summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorScot Breitenfeld <brtnfld@hdfgroup.org>2015-06-16 13:49:28 (GMT)
committerScot Breitenfeld <brtnfld@hdfgroup.org>2015-06-16 13:49:28 (GMT)
commit48086667b56335a9344115c79e5de75fff2f4089 (patch)
treea0cf92f8e698cd689e9581ed46570c2dc18fbef2 /src
parent133a5c36817c0645a8baa5478c436d7fa285503b (diff)
parentafb85e30e5874454fe890bb06c1a8cc67d2dc245 (diff)
downloadhdf5-48086667b56335a9344115c79e5de75fff2f4089.zip
hdf5-48086667b56335a9344115c79e5de75fff2f4089.tar.gz
hdf5-48086667b56335a9344115c79e5de75fff2f4089.tar.bz2
[svn-r27209] svn merge -r27186:27208 https://svn.hdfgroup.uiuc.edu/hdf5/trunk
Diffstat (limited to 'src')
-rw-r--r--src/H5AC.c3033
-rw-r--r--src/H5ACpkg.h19
-rw-r--r--src/H5ACprivate.h21
-rw-r--r--src/H5C.c83
-rw-r--r--src/H5Cpkg.h2052
-rw-r--r--src/H5Cprivate.h625
-rw-r--r--src/H5Dchunk.c8
-rw-r--r--src/H5Dpkg.h1
-rw-r--r--src/H5Fint.c17
9 files changed, 2442 insertions, 3417 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index c5466be..db5a06f 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -27,6 +27,10 @@
*-------------------------------------------------------------------------
*/
+/****************/
+/* Module Setup */
+/****************/
+
#define H5AC_PACKAGE /*suppress error about including H5ACpkg */
#define H5C_PACKAGE /*suppress error about including H5Cpkg */
#define H5F_PACKAGE /*suppress error about including H5Fpkg */
@@ -34,10 +38,9 @@
/* Interface initialization */
#define H5_INTERFACE_INIT_FUNC H5AC_init_interface
-#ifdef H5_HAVE_PARALLEL
-#include <mpi.h>
-#endif /* H5_HAVE_PARALLEL */
-
+/***********/
+/* Headers */
+/***********/
#include "H5private.h" /* Generic Functions */
#include "H5ACpkg.h" /* Metadata cache */
#include "H5Cpkg.h" /* Cache */
@@ -51,13 +54,16 @@
#include "H5Pprivate.h" /* Property lists */
-#ifdef H5_HAVE_PARALLEL
+/****************/
+/* Local Macros */
+/****************/
-/* Declare a free list to manage the H5AC_aux_t struct */
-H5FL_DEFINE_STATIC(H5AC_aux_t);
-#endif /* H5_HAVE_PARALLEL */
+/******************/
+/* Local Typedefs */
+/******************/
+#ifdef H5_HAVE_PARALLEL
/****************************************************************************
*
* structure H5AC_slist_entry_t
@@ -69,143 +75,135 @@ H5FL_DEFINE_STATIC(H5AC_aux_t);
* allocated structure to store these offsets in. This structure serves
* that purpose. Its fields are as follows:
*
- * magic: Unsigned 32 bit integer always set to
- * H5AC__H5AC_SLIST_ENTRY_T_MAGIC. This field is used to
- * validate pointers to instances of H5AC_slist_entry_t.
- *
* addr: file offset of a metadata entry. Entries are added to this
* list (if they aren't there already) when they are marked
* dirty in an unprotect, inserted, or moved. They are
* removed when they appear in a clean entries broadcast.
*
****************************************************************************/
-
-#ifdef H5_HAVE_PARALLEL
-
-#define H5AC__H5AC_SLIST_ENTRY_T_MAGIC 0x00D0A02
-
typedef struct H5AC_slist_entry_t
{
- uint32_t magic;
-
haddr_t addr;
} H5AC_slist_entry_t;
-/* Declare a free list to manage the H5AC_slist_entry_t struct */
-H5FL_DEFINE_STATIC(H5AC_slist_entry_t);
+/* User data for address list building callbacks */
+typedef struct H5AC_addr_list_ud_t
+{
+ H5AC_aux_t * aux_ptr; /* 'Auxiliary' parallel cache info */
+ haddr_t * addr_buf_ptr; /* Array to store addresses */
+ int i; /* Counter for position in array */
+} H5AC_addr_list_ud_t;
+#endif /* H5_HAVE_PARALLEL */
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static herr_t H5AC__check_if_write_permitted(const H5F_t *f,
+ hbool_t *write_permitted_ptr);
+static herr_t H5AC__ext_config_2_int_config(H5AC_cache_config_t *ext_conf_ptr,
+ H5C_auto_size_ctl_t *int_conf_ptr);
+
+#ifdef H5_HAVE_PARALLEL
+static herr_t H5AC__broadcast_candidate_list(H5AC_t *cache_ptr,
+ int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+static herr_t H5AC__broadcast_clean_list(H5AC_t *cache_ptr);
+static herr_t H5AC__construct_candidate_list(H5AC_t *cache_ptr,
+ H5AC_aux_t *aux_ptr, int sync_point_op);
+static herr_t H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr,
+ int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+static herr_t H5AC__flush_entries(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__log_deleted_entry(const H5AC_info_t *entry_ptr);
+static herr_t H5AC__log_dirtied_entry(const H5AC_info_t *entry_ptr);
+static herr_t H5AC__log_flushed_entry(H5C_t *cache_ptr, haddr_t addr,
+ hbool_t was_dirty, unsigned flags);
+static herr_t H5AC__log_inserted_entry(const H5AC_info_t *entry_ptr);
+static herr_t H5AC__log_moved_entry(const H5F_t *f, haddr_t old_addr,
+ haddr_t new_addr);
+static herr_t H5AC__propagate_and_apply_candidate_list(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__propagate_flushed_and_still_clean_entries_list(H5F_t *f,
+ hid_t dxpl_id);
+static herr_t H5AC__receive_haddr_list(MPI_Comm mpi_comm, int *num_entries_ptr,
+ haddr_t **haddr_buf_ptr_ptr);
+static herr_t H5AC__receive_candidate_list(const H5AC_t *cache_ptr,
+ int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+static herr_t H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id);
+static herr_t H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, int num_candidates,
+ haddr_t *candidates_list_ptr);
+static herr_t H5AC__rsp__dist_md_write__flush(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__rsp__dist_md_write__flush_to_min_clean(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__rsp__p0_only__flush(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__rsp__p0_only__flush_to_min_clean(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__run_sync_point(H5F_t *f, hid_t dxpl_id, int sync_point_op);
#endif /* H5_HAVE_PARALLEL */
-/*
- * Private file-scope variables.
- */
+/*********************/
+/* Package Variables */
+/*********************/
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
/* Default dataset transfer property list for metadata I/O calls */
/* (Collective set, "block before metadata write" set and "library internal" set) */
/* (Global variable definition, declaration is in H5ACprivate.h also) */
-hid_t H5AC_dxpl_id=(-1);
+hid_t H5AC_dxpl_id = (-1);
/* Dataset transfer property list for independent metadata I/O calls */
/* (just "library internal" set - i.e. independent transfer mode) */
/* (Global variable definition, declaration is in H5ACprivate.h also) */
H5P_genplist_t *H5AC_ind_dxpl_g = NULL;
-hid_t H5AC_ind_dxpl_id=(-1);
+hid_t H5AC_ind_dxpl_id = (-1);
-/*
- * Private file-scope function declarations:
- */
-
-static herr_t H5AC_check_if_write_permitted(const H5F_t *f,
- hid_t dxpl_id,
- hbool_t * write_permitted_ptr);
-
-static herr_t H5AC_ext_config_2_int_config(H5AC_cache_config_t * ext_conf_ptr,
- H5C_auto_size_ctl_t * int_conf_ptr);
+/*******************/
+/* Local Variables */
+/*******************/
#ifdef H5_HAVE_PARALLEL
-static herr_t H5AC_broadcast_candidate_list(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr);
-
-static herr_t H5AC_broadcast_clean_list(H5AC_t * cache_ptr);
-
-static herr_t H5AC_construct_candidate_list(H5AC_t * cache_ptr,
- H5AC_aux_t * aux_ptr,
- int sync_point_op);
-
-static herr_t H5AC_copy_candidate_list_to_buffer(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr,
- size_t * MPI_Offset_buf_size_ptr,
- MPI_Offset ** MPI_Offset_buf_ptr_ptr);
-
-static herr_t H5AC_flush_entries(H5F_t *f);
-
-static herr_t H5AC_log_deleted_entry(H5AC_t * cache_ptr,
- H5AC_info_t * entry_ptr,
- haddr_t addr,
- unsigned int flags);
-
-static herr_t H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
- haddr_t addr);
-
-static herr_t H5AC_log_flushed_entry(H5C_t * cache_ptr,
- haddr_t addr,
- hbool_t was_dirty,
- unsigned flags,
- int type_id);
-
-static herr_t H5AC_log_moved_entry(const H5F_t * f,
- haddr_t old_addr,
- haddr_t new_addr);
-
-static herr_t H5AC_log_inserted_entry(H5AC_t * cache_ptr,
- H5AC_info_t * entry_ptr);
-
-static herr_t H5AC_propagate_and_apply_candidate_list(H5F_t * f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
-
-static herr_t H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
-
-static herr_t H5AC_receive_candidate_list(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr);
-
-static herr_t H5AC_receive_and_apply_clean_list(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- H5AC_t * cache_ptr);
-
-static herr_t H5AC_tidy_cache_0_lists(H5AC_t * cache_ptr,
- int num_candidates,
- haddr_t * candidates_list_ptr);
-
-herr_t H5AC_rsp__dist_md_write__flush(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
-
-herr_t H5AC_rsp__dist_md_write__flush_to_min_clean(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
-
-herr_t H5AC_rsp__p0_only__flush(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
+/* Declare a free list to manage the H5AC_aux_t struct */
+H5FL_DEFINE_STATIC(H5AC_aux_t);
-herr_t H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
+/* Declare a free list to manage the H5AC_slist_entry_t struct */
+H5FL_DEFINE_STATIC(H5AC_slist_entry_t);
+#endif /* H5_HAVE_PARALLEL */
-static herr_t H5AC_run_sync_point(H5F_t *f,
- hid_t dxpl_id,
- int sync_point_op);
+static const char *H5AC_entry_type_names[H5AC_NTYPES] =
+{
+ "B-tree nodes",
+ "symbol table nodes",
+ "local heap prefixes",
+ "local heap data blocks",
+ "global heaps",
+ "object headers",
+ "object header chunks",
+ "v2 B-tree headers",
+ "v2 B-tree internal nodes",
+ "v2 B-tree leaf nodes",
+ "fractal heap headers",
+ "fractal heap direct blocks",
+ "fractal heap indirect blocks",
+ "free space headers",
+ "free space sections",
+ "shared OH message master table",
+ "shared OH message index",
+ "extensible array headers",
+ "extensible array index blocks",
+ "extensible array super blocks",
+ "extensible array data blocks",
+ "extensible array data block pages",
+ "fixed array headers",
+ "fixed array data block",
+ "fixed array data block pages",
+ "superblock",
+ "test entry" /* for testing only -- not used for actual files */
+};
-#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
@@ -232,7 +230,7 @@ H5AC_init(void)
done:
FUNC_LEAVE_NOAPI(ret_value)
-}
+} /* end H5AC_init() */
/*-------------------------------------------------------------------------
@@ -273,8 +271,8 @@ H5AC_init_interface(void)
/* Insert 'collective metadata write' property */
coll_meta_write = 1;
if(H5P_insert(xfer_plist, H5AC_COLLECTIVE_META_WRITE_NAME, H5AC_COLLECTIVE_META_WRITE_SIZE, &coll_meta_write,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
/* Get an ID for the independent H5AC dxpl */
if((H5AC_ind_dxpl_id = H5P_create_id(H5P_CLS_DATASET_XFER_g, FALSE)) < 0)
@@ -287,8 +285,8 @@ H5AC_init_interface(void)
/* Insert 'collective metadata write' property */
coll_meta_write = 0;
if(H5P_insert(H5AC_ind_dxpl_g, H5AC_COLLECTIVE_META_WRITE_NAME, H5AC_COLLECTIVE_META_WRITE_SIZE, &coll_meta_write,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
#else /* H5_HAVE_PARALLEL */
/* Sanity check */
HDassert(H5P_LST_DATASET_XFER_ID_g!=(-1));
@@ -297,7 +295,7 @@ H5AC_init_interface(void)
H5AC_ind_dxpl_id = H5P_DATASET_XFER_DEFAULT;
/* Get the property list objects for the IDs */
- if (NULL == (H5AC_ind_dxpl_g = (H5P_genplist_t *)H5I_object(H5AC_ind_dxpl_id)))
+ if(NULL == (H5AC_ind_dxpl_g = (H5P_genplist_t *)H5I_object(H5AC_ind_dxpl_id)))
HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get property list object")
#endif /* H5_HAVE_PARALLEL */
@@ -328,15 +326,14 @@ H5AC_term_interface(void)
FUNC_ENTER_NOAPI_NOINIT_NOERR
- if (H5_interface_initialize_g) {
+ if(H5_interface_initialize_g) {
#ifdef H5_HAVE_PARALLEL
if(H5AC_dxpl_id > 0 || H5AC_ind_dxpl_id > 0) {
/* Indicate more work to do */
n = 1; /* H5I */
/* Close H5AC dxpl */
- if(H5I_dec_ref(H5AC_dxpl_id) < 0 ||
- H5I_dec_ref(H5AC_ind_dxpl_id) < 0)
+ if(H5I_dec_ref(H5AC_dxpl_id) < 0 || H5I_dec_ref(H5AC_ind_dxpl_id) < 0)
H5E_clear_stack(NULL); /*ignore error*/
else {
/* Reset static IDs */
@@ -347,50 +344,22 @@ H5AC_term_interface(void)
H5_interface_initialize_g = 0;
} /* end else */
} /* end if */
- else
-#else /* H5_HAVE_PARALLEL */
+ else {
+#endif /* H5_HAVE_PARALLEL */
/* Reset static IDs */
- H5AC_dxpl_id=(-1);
- H5AC_ind_dxpl_id=(-1);
+ H5AC_dxpl_id = (-1);
+ H5AC_ind_dxpl_id = (-1);
+#ifdef H5_HAVE_PARALLEL
+ } /* end else */
#endif /* H5_HAVE_PARALLEL */
- /* Reset interface initialization flag */
- H5_interface_initialize_g = 0;
+
+ /* Reset interface initialization flag */
+ H5_interface_initialize_g = 0;
} /* end if */
FUNC_LEAVE_NOAPI(n)
} /* end H5AC_term_interface() */
-static const char * H5AC_entry_type_names[H5AC_NTYPES] =
-{
- "B-tree nodes",
- "symbol table nodes",
- "local heap prefixes",
- "local heap data blocks",
- "global heaps",
- "object headers",
- "object header chunks",
- "v2 B-tree headers",
- "v2 B-tree internal nodes",
- "v2 B-tree leaf nodes",
- "fractal heap headers",
- "fractal heap direct blocks",
- "fractal heap indirect blocks",
- "free space headers",
- "free space sections",
- "shared OH message master table",
- "shared OH message index",
- "extensible array headers",
- "extensible array index blocks",
- "extensible array super blocks",
- "extensible array data blocks",
- "extensible array data block pages",
- "fixed array headers",
- "fixed array data block",
- "fixed array data block pages",
- "superblock",
- "test entry" /* for testing only -- not used for actual files */
-};
-
/*-------------------------------------------------------------------------
* Function: H5AC_create
@@ -411,8 +380,7 @@ static const char * H5AC_entry_type_names[H5AC_NTYPES] =
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_create(const H5F_t *f,
- H5AC_cache_config_t *config_ptr)
+H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
{
#ifdef H5_HAVE_PARALLEL
char prefix[H5C__PREFIX_LEN] = "";
@@ -422,6 +390,7 @@ H5AC_create(const H5F_t *f,
FUNC_ENTER_NOAPI(FAIL)
+ /* Check arguments */
HDassert(f);
HDassert(NULL == f->shared->cache);
HDassert(config_ptr != NULL) ;
@@ -429,7 +398,7 @@ H5AC_create(const H5F_t *f,
HDcompile_assert(H5C__MAX_NUM_TYPE_IDS == H5AC_NTYPES);
if(H5AC_validate_config(config_ptr) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Bad cache configuration")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache configuration")
#ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
@@ -467,11 +436,8 @@ H5AC_create(const H5F_t *f,
aux_ptr->move_dirty_bytes_updates = 0;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
aux_ptr->d_slist_ptr = NULL;
- aux_ptr->d_slist_len = 0;
aux_ptr->c_slist_ptr = NULL;
- aux_ptr->c_slist_len = 0;
aux_ptr->candidate_slist_ptr = NULL;
- aux_ptr->candidate_slist_len = 0;
aux_ptr->write_done = NULL;
aux_ptr->sync_point_done = NULL;
@@ -492,66 +458,50 @@ H5AC_create(const H5F_t *f,
if(NULL == (aux_ptr->candidate_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create candidate entry list.")
- if(aux_ptr != NULL) {
- if(aux_ptr->mpi_rank == 0) {
+ if(aux_ptr != NULL)
+ if(aux_ptr->mpi_rank == 0)
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
- H5AC__DEFAULT_MIN_CLEAN_SIZE,
- (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
- H5AC_check_if_write_permitted,
- TRUE,
- H5AC_log_flushed_entry,
- (void *)aux_ptr);
- } else {
+ H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
+ (const char **)H5AC_entry_type_names,
+ H5AC__check_if_write_permitted, TRUE, H5AC__log_flushed_entry,
+ (void *)aux_ptr);
+ else
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
- H5AC__DEFAULT_MIN_CLEAN_SIZE,
- (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
- H5AC_check_if_write_permitted,
- TRUE,
- NULL,
- (void *)aux_ptr);
- }
- } else {
+ H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
+ (const char **)H5AC_entry_type_names,
+ H5AC__check_if_write_permitted, TRUE, NULL,
+ (void *)aux_ptr);
+ else
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
- H5AC__DEFAULT_MIN_CLEAN_SIZE,
- (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
- H5AC_check_if_write_permitted,
- TRUE,
- NULL,
- NULL);
- }
- } else {
+ H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
+ (const char **)H5AC_entry_type_names,
+ H5AC__check_if_write_permitted, TRUE, NULL, NULL);
+ } /* end if */
+ else {
#endif /* H5_HAVE_PARALLEL */
/* The default max cache size and min clean size will frequently be
* overwritten shortly by the subsequent set resize config call.
* -- JRM
*/
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
- H5AC__DEFAULT_MIN_CLEAN_SIZE,
- (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
- H5AC_check_if_write_permitted,
- TRUE,
- NULL,
- NULL);
+ H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
+ (const char **)H5AC_entry_type_names,
+ H5AC__check_if_write_permitted, TRUE, NULL, NULL);
#ifdef H5_HAVE_PARALLEL
- }
+ } /* end else */
#endif /* H5_HAVE_PARALLEL */
if(NULL == f->shared->cache)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed")
#ifdef H5_HAVE_PARALLEL
- if(aux_ptr != NULL) {
+ if(aux_ptr != NULL)
if(H5C_set_prefix(f->shared->cache, prefix) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "H5C_set_prefix() failed")
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "H5C_set_prefix() failed")
#endif /* H5_HAVE_PARALLEL */
if(H5AC_set_cache_auto_resize_config(f->shared->cache, config_ptr) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "auto resize configuration failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "auto resize configuration failed")
done:
#ifdef H5_HAVE_PARALLEL
@@ -560,13 +510,10 @@ done:
if(aux_ptr != NULL) {
if(aux_ptr->d_slist_ptr != NULL)
H5SL_close(aux_ptr->d_slist_ptr);
-
if(aux_ptr->c_slist_ptr != NULL)
H5SL_close(aux_ptr->c_slist_ptr);
-
if(aux_ptr->candidate_slist_ptr != NULL)
H5SL_close(aux_ptr->candidate_slist_ptr);
-
aux_ptr->magic = 0;
aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr);
} /* end if */
@@ -623,7 +570,7 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
/* Attempt to flush all entries from rank 0 & Bcast clean list to other ranks */
- if(H5AC_flush_entries(f) < 0)
+ if(H5AC__flush_entries(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
#endif /* H5_HAVE_PARALLEL */
@@ -665,13 +612,9 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_expunge_entry(H5F_t *f,
- hid_t dxpl_id,
- const H5AC_class_t *type,
- haddr_t addr,
- unsigned flags)
+H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
+ haddr_t addr, unsigned flags)
{
- herr_t result;
#if H5AC__TRACE_FILE_ENABLED
char trace[128] = "";
FILE * trace_file_ptr = NULL;
@@ -680,6 +623,7 @@ H5AC_expunge_entry(H5F_t *f,
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
@@ -696,41 +640,21 @@ H5AC_expunge_entry(H5F_t *f,
* necessary in the trace file. Write the return value to catch occult
* errors.
*/
- if ( ( cache_ptr != NULL ) &&
- ( H5C_get_trace_file_ptr(cache_ptr, &trace_file_ptr) >= 0 ) &&
- ( trace_file_ptr != NULL ) ) {
-
- sprintf(trace, "H5AC_expunge_entry 0x%lx %d",
- (unsigned long)addr,
- (int)(type->id));
- }
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ sprintf(trace, "%s 0x%lx %d", FUNC, (unsigned long)addr, (int)(type->id));
}
#endif /* H5AC__TRACE_FILE_ENABLED */
- result = H5C_expunge_entry(f,
- dxpl_id,
- H5AC_dxpl_id,
- type,
- addr,
- flags);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, \
- "H5C_expunge_entry() failed.")
- }
+ if(H5C_expunge_entry(f, dxpl_id, H5AC_dxpl_id, type, addr, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "H5C_expunge_entry() failed.")
done:
-
#if H5AC__TRACE_FILE_ENABLED
- if ( trace_file_ptr != NULL ) {
-
+ if(trace_file_ptr != NULL)
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
- }
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_expunge_entry() */
@@ -765,6 +689,7 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id)
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
@@ -773,21 +698,18 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id)
/* For the flush, only the flags are really necessary in the trace file.
* Write the result to catch occult errors.
*/
- if((f != NULL) &&
- (f->shared != NULL) &&
- (f->shared->cache != NULL) &&
- (H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
- (trace_file_ptr != NULL))
- sprintf(trace, "H5AC_flush");
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ sprintf(trace, "%s", FUNC);
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
/* Attempt to flush all entries from rank 0 & Bcast clean list to other ranks */
- if(H5AC_flush_entries(f) < 0)
+ if(H5AC__flush_entries(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
#endif /* H5_HAVE_PARALLEL */
/* Flush the cache */
+ /* (Again, in parallel - writes out the superblock) */
if(H5C_flush_cache(f, dxpl_id, H5AC_dxpl_id, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache.")
@@ -808,12 +730,12 @@ done:
* cache contains an entry at that location. If it does,
* also determine whether the entry is dirty, protected,
* pinned, etc. and return that information to the caller
- * in *status_ptr.
+ * in *status.
*
* If the specified entry doesn't exist, set *status_ptr
* to zero.
*
- * On error, the value of *status_ptr is undefined.
+ * On error, the value of *status is undefined.
*
* Return: Non-negative on success/Negative on failure
*
@@ -823,44 +745,40 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_get_entry_status(const H5F_t *f,
- haddr_t addr,
- unsigned * status_ptr)
+H5AC_get_entry_status(const H5F_t *f, haddr_t addr, unsigned *status)
{
- hbool_t in_cache;
- hbool_t is_dirty;
- hbool_t is_protected;
- hbool_t is_pinned;
- hbool_t is_flush_dep_child;
- hbool_t is_flush_dep_parent;
- size_t entry_size;
- unsigned status = 0;
+ hbool_t in_cache; /* Entry @ addr is in the cache */
+ hbool_t is_dirty; /* Entry @ addr is in the cache and dirty */
+ hbool_t is_protected; /* Entry @ addr is in the cache and protected */
+ hbool_t is_pinned; /* Entry @ addr is in the cache and pinned */
+ hbool_t is_flush_dep_child; /* Entry @ addr is in the cache and is a flush dependency child */
+ hbool_t is_flush_dep_parent; /* Entry @ addr is in the cache and is a flush dependency parent */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if((f == NULL) || (!H5F_addr_defined(addr)) || (status_ptr == NULL))
+ if((f == NULL) || (!H5F_addr_defined(addr)) || (status == NULL))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry.")
- if(H5C_get_entry_status(f, addr, &entry_size, &in_cache, &is_dirty,
+ if(H5C_get_entry_status(f, addr, NULL, &in_cache, &is_dirty,
&is_protected, &is_pinned, &is_flush_dep_parent, &is_flush_dep_child) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_status() failed.")
if(in_cache) {
- status |= H5AC_ES__IN_CACHE;
+ *status |= H5AC_ES__IN_CACHE;
if(is_dirty)
- status |= H5AC_ES__IS_DIRTY;
+ *status |= H5AC_ES__IS_DIRTY;
if(is_protected)
- status |= H5AC_ES__IS_PROTECTED;
+ *status |= H5AC_ES__IS_PROTECTED;
if(is_pinned)
- status |= H5AC_ES__IS_PINNED;
+ *status |= H5AC_ES__IS_PINNED;
if(is_flush_dep_parent)
- status |= H5AC_ES__IS_FLUSH_DEP_PARENT;
+ *status |= H5AC_ES__IS_FLUSH_DEP_PARENT;
if(is_flush_dep_child)
- status |= H5AC_ES__IS_FLUSH_DEP_CHILD;
+ *status |= H5AC_ES__IS_FLUSH_DEP_CHILD;
} /* end if */
-
- *status_ptr = status;
+ else
+ *status = 0;
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -895,6 +813,7 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
@@ -916,17 +835,9 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
* Note that some data is not available right now -- put what we can
* in the trace buffer now, and fill in the rest at the end.
*/
- if ( ( f != NULL ) &&
- ( f->shared != NULL ) &&
- ( f->shared->cache != NULL ) &&
- ( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
- ( trace_file_ptr != NULL ) ) {
-
- sprintf(trace, "H5AC_insert_entry 0x%lx %d 0x%x",
- (unsigned long)addr,
- type->id,
- flags);
- }
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ sprintf(trace, "%s 0x%lx %d 0x%x", FUNC, (unsigned long)addr, type->id,
+ flags);
#endif /* H5AC__TRACE_FILE_ENABLED */
/* Insert entry into metadata cache */
@@ -934,10 +845,9 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C_insert_entry() failed")
#if H5AC__TRACE_FILE_ENABLED
- if(trace_file_ptr != NULL) {
+ if(trace_file_ptr != NULL)
/* make note of the entry size */
trace_entry_size = ((H5C_cache_entry_t *)thing)->size;
- }
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
@@ -946,12 +856,12 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
if(NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr)) {
/* Log the new entry */
- if(H5AC_log_inserted_entry(f->shared->cache, (H5AC_info_t *)thing) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5AC_log_inserted_entry() failed")
+ if(H5AC__log_inserted_entry((H5AC_info_t *)thing) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5AC__log_inserted_entry() failed")
/* Check if we should try to flush */
if(aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)
- if(H5AC_run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ if(H5AC__run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
} /* end if */
}
@@ -959,11 +869,8 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
done:
#if H5AC__TRACE_FILE_ENABLED
- if(trace_file_ptr != NULL) {
- HDfprintf(trace_file_ptr, "%s %d %d\n", trace,
- (int)trace_entry_size,
- (int)ret_value);
- }
+ if(trace_file_ptr != NULL)
+ HDfprintf(trace_file_ptr, "%s %d %d\n", trace, (int)trace_entry_size, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
@@ -1002,8 +909,7 @@ H5AC_mark_entry_dirty(void *thing)
* is really necessary in the trace file. Write the result to catch
* occult errors.
*/
- if((H5C_get_trace_file_ptr_from_entry((const H5C_cache_entry_t *) thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(thing)))
sprintf(trace, "%s 0x%lx", FUNC,
(unsigned long)(((H5C_cache_entry_t *)thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1017,10 +923,9 @@ H5AC_mark_entry_dirty(void *thing)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
if((!entry_ptr->is_dirty) && (!entry_ptr->is_protected) &&
- (entry_ptr->is_pinned) && (NULL != cache_ptr->aux_ptr)) {
- if(H5AC_log_dirtied_entry(entry_ptr, entry_ptr->addr) < 0)
+ (entry_ptr->is_pinned) && (NULL != cache_ptr->aux_ptr))
+ if(H5AC__log_dirtied_entry(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't log dirtied entry")
- } /* end if */
}
#endif /* H5_HAVE_PARALLEL */
@@ -1059,12 +964,13 @@ H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t ne
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
- H5AC_aux_t * aux_ptr;
+ H5AC_aux_t *aux_ptr;
#endif /* H5_HAVE_PARALLEL */
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared->cache);
HDassert(type);
@@ -1077,25 +983,16 @@ H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t ne
* necessary in the trace file. Include the type id so we don't have to
* look it up. Also write the result to catch occult errors.
*/
- if ( ( f != NULL ) &&
- ( f->shared != NULL ) &&
- ( f->shared->cache != NULL ) &&
- ( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
- ( trace_file_ptr != NULL ) ) {
-
- sprintf(trace, "H5AC_move_entry 0x%lx 0x%lx %d",
- (unsigned long)old_addr,
- (unsigned long)new_addr,
- (int)(type->id));
- }
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ sprintf(trace, "%s 0x%lx 0x%lx %d", FUNC, (unsigned long)old_addr,
+ (unsigned long)new_addr, (int)(type->id));
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
/* Log moving the entry */
- if(NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr)) {
- if(H5AC_log_moved_entry(f, old_addr, new_addr) < 0)
+ if(NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr))
+ if(H5AC__log_moved_entry(f, old_addr, new_addr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log moved entry")
- } /* end if */
#endif /* H5_HAVE_PARALLEL */
if(H5C_move_entry(f->shared->cache, type, old_addr, new_addr) < 0)
@@ -1103,10 +1000,9 @@ H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t ne
#ifdef H5_HAVE_PARALLEL
/* Check if we should try to flush */
- if(NULL != aux_ptr && aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold) {
- if(H5AC_run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ if(NULL != aux_ptr && aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)
+ if(H5AC__run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
- } /* end if */
#endif /* H5_HAVE_PARALLEL */
done:
@@ -1150,8 +1046,7 @@ H5AC_pin_protected_entry(void *thing)
/* For the pin protected entry call, only the addr is really necessary
* in the trace file. Also write the result to catch occult errors.
*/
- if((H5C_get_trace_file_ptr_from_entry((const H5C_cache_entry_t *)thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(thing)))
sprintf(trace, "%s 0x%lx", FUNC,
(unsigned long)(((H5C_cache_entry_t *)thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1198,10 +1093,8 @@ H5AC_create_flush_dependency(void * parent_thing, void * child_thing)
HDassert(child_thing);
#if H5AC__TRACE_FILE_ENABLED
- if((H5C_get_trace_file_ptr_from_entry((H5C_cache_entry_t *)parent_thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
- sprintf(trace, "%s %lx %lx",
- FUNC,
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(parent_thing)))
+ sprintf(trace, "%s %lx %lx", FUNC,
(unsigned long)(((H5C_cache_entry_t *)parent_thing)->addr),
(unsigned long)(((H5C_cache_entry_t *)child_thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1244,15 +1137,11 @@ done:
*-------------------------------------------------------------------------
*/
void *
-H5AC_protect(H5F_t *f,
- hid_t dxpl_id,
- const H5AC_class_t *type,
- haddr_t addr,
- void *udata,
- H5AC_protect_t rw)
+H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
+ void *udata, H5AC_protect_t rw)
{
unsigned protect_flags = H5C__NO_FLAGS_SET;
- void * thing = (void *)NULL;
+ void * thing; /* Pointer to native data structure for entry */
#if H5AC__TRACE_FILE_ENABLED
char trace[128] = "";
size_t trace_entry_size = 0;
@@ -1262,7 +1151,7 @@ H5AC_protect(H5F_t *f,
FUNC_ENTER_NOAPI(NULL)
- /* check args */
+ /* Sanity check */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
@@ -1281,11 +1170,7 @@ H5AC_protect(H5F_t *f,
* sanity check. Also indicate whether the call was successful to
* catch occult errors.
*/
- if ( ( f != NULL ) &&
- ( f->shared != NULL ) &&
- ( f->shared->cache != NULL ) &&
- ( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
- ( trace_file_ptr != NULL ) ) {
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr))) {
const char * rw_string;
@@ -1302,53 +1187,34 @@ H5AC_protect(H5F_t *f,
rw_string = "???";
}
- sprintf(trace, "H5AC_protect 0x%lx %d %s",
- (unsigned long)addr,
- (int)(type->id),
- rw_string);
+ sprintf(trace, "%s 0x%lx %d %s", FUNC, (unsigned long)addr,
+ (int)(type->id), rw_string);
}
#endif /* H5AC__TRACE_FILE_ENABLED */
if ( rw == H5AC_READ )
protect_flags |= H5C__READ_ONLY_FLAG;
- thing = H5C_protect(f,
- dxpl_id,
- H5AC_dxpl_id,
- type,
- addr,
- udata,
- protect_flags);
-
- if ( thing == NULL ) {
+ if(NULL == (thing = H5C_protect(f, dxpl_id, H5AC_dxpl_id, type, addr, udata, protect_flags)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_protect() failed.")
- }
#if H5AC__TRACE_FILE_ENABLED
- if ( trace_file_ptr != NULL ) {
-
- /* make note of the entry size */
+ if(trace_file_ptr != NULL)
+ /* Make note of the entry size */
trace_entry_size = ((H5C_cache_entry_t *)thing)->size;
- }
#endif /* H5AC__TRACE_FILE_ENABLED */
/* Set return value */
ret_value = thing;
done:
-
#if H5AC__TRACE_FILE_ENABLED
- if ( trace_file_ptr != NULL ) {
-
- HDfprintf(trace_file_ptr, "%s %d %d\n", trace,
- (int)trace_entry_size,
- (int)(ret_value != NULL));
- }
+ if(trace_file_ptr != NULL)
+ HDfprintf(trace_file_ptr, "%s %d %d\n", trace, (int)trace_entry_size, (int)(ret_value != NULL));
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_protect() */
@@ -1383,8 +1249,7 @@ H5AC_resize_entry(void *thing, size_t new_size)
* really necessary in the trace file. Write the result to catch
* occult errors.
*/
- if((H5C_get_trace_file_ptr_from_entry((H5C_cache_entry_t *)thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(thing)))
sprintf(trace, "%s 0x%lx %d", FUNC,
(unsigned long)(((H5C_cache_entry_t *)thing)->addr),
(int)new_size);
@@ -1401,10 +1266,9 @@ H5AC_resize_entry(void *thing, size_t new_size)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- if((!entry_ptr->is_dirty) && (NULL != cache_ptr->aux_ptr)) {
- if(H5AC_log_dirtied_entry(entry_ptr, entry_ptr->addr) < 0)
+ if((!entry_ptr->is_dirty) && (NULL != cache_ptr->aux_ptr))
+ if(H5AC__log_dirtied_entry(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't log dirtied entry")
- } /* end if */
}
#endif /* H5_HAVE_PARALLEL */
@@ -1449,8 +1313,7 @@ H5AC_unpin_entry(void *thing)
/* For the unpin entry call, only the addr is really necessary
* in the trace file. Also write the result to catch occult errors.
*/
- if((H5C_get_trace_file_ptr_from_entry((H5C_cache_entry_t *)thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(thing)))
sprintf(trace, "%s 0x%lx", FUNC,
(unsigned long)(((H5C_cache_entry_t *)thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1496,10 +1359,8 @@ H5AC_destroy_flush_dependency(void * parent_thing, void * child_thing)
HDassert(child_thing);
#if H5AC__TRACE_FILE_ENABLED
- if((H5C_get_trace_file_ptr_from_entry((H5C_cache_entry_t *)parent_thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
- sprintf(trace, "%s %llx %llx",
- FUNC,
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(parent_thing)))
+ sprintf(trace, "%s %llx %llx", FUNC,
(unsigned long long)(((H5C_cache_entry_t *)parent_thing)->addr),
(unsigned long long)(((H5C_cache_entry_t *)child_thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1572,6 +1433,7 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
@@ -1588,21 +1450,13 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
* new size are really necessary in the trace file. Write the return
* value to catch occult errors.
*/
- if ( ( f != NULL ) &&
- ( f->shared != NULL ) &&
- ( f->shared->cache != NULL ) &&
- ( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
- ( trace_file_ptr != NULL ) ) {
-
- sprintf(trace, "H5AC_unprotect 0x%lx %d",
- (unsigned long)addr,
- (int)(type->id));
- }
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ sprintf(trace, "%s 0x%lx %d", FUNC, (unsigned long)addr, (int)(type->id));
#endif /* H5AC__TRACE_FILE_ENABLED */
- dirtied = (hbool_t)( ( (flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG ) ||
- ( ((H5AC_info_t *)thing)->dirtied ) );
- deleted = (hbool_t)( (flags & H5C__DELETED_FLAG) == H5C__DELETED_FLAG );
+ dirtied = (hbool_t)(((flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG) ||
+ (((H5AC_info_t *)thing)->dirtied));
+ deleted = (hbool_t)((flags & H5C__DELETED_FLAG) == H5C__DELETED_FLAG);
/* Check if the size changed out from underneath us, if we're not deleting
* the entry.
@@ -1611,24 +1465,21 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
size_t curr_size = 0;
if((type->size)(f, thing, &curr_size) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, "Can't get size of thing")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "Can't get size of thing")
if(((H5AC_info_t *)thing)->size != curr_size)
HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, FAIL, "size of entry changed")
} /* end if */
#ifdef H5_HAVE_PARALLEL
- if((dirtied) && (((H5AC_info_t *)thing)->is_dirty == FALSE) &&
- (NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr))) {
- if(H5AC_log_dirtied_entry((H5AC_info_t *)thing, addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log dirtied entry")
- } /* end if */
+ if(NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr)) {
+ if(dirtied && ((H5AC_info_t *)thing)->is_dirty == FALSE)
+ if(H5AC__log_dirtied_entry((H5AC_info_t *)thing) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log dirtied entry")
- if((deleted) &&
- (NULL != (aux_ptr = (H5AC_aux_t *)(f->shared->cache->aux_ptr))) &&
- (aux_ptr->mpi_rank == 0)) {
- if(H5AC_log_deleted_entry(f->shared->cache, (H5AC_info_t *)thing, addr, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC_log_deleted_entry() failed.")
+ if(deleted && aux_ptr->mpi_rank == 0)
+ if(H5AC__log_deleted_entry((H5AC_info_t *)thing) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC__log_deleted_entry() failed.")
} /* end if */
#endif /* H5_HAVE_PARALLEL */
@@ -1637,17 +1488,15 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
#ifdef H5_HAVE_PARALLEL
/* Check if we should try to flush */
- if((aux_ptr != NULL) && (aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)) {
- if(H5AC_run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ if((aux_ptr != NULL) && (aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold))
+ if(H5AC__run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
- } /* end if */
#endif /* H5_HAVE_PARALLEL */
done:
#if H5AC__TRACE_FILE_ENABLED
if(trace_file_ptr != NULL)
- HDfprintf(trace_file_ptr, "%s 0x%x %d\n",
- trace, (unsigned)flags, (int)ret_value);
+ HDfprintf(trace_file_ptr, "%s 0x%x %d\n", trace, (unsigned)flags, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
@@ -1678,12 +1527,11 @@ H5AC_set_sync_point_done_callback(H5C_t * cache_ptr,
FUNC_ENTER_NOAPI_NOINIT_NOERR
+ /* Sanity checks */
HDassert(cache_ptr && (cache_ptr->magic == H5C__H5C_T_MAGIC));
-
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
aux_ptr->sync_point_done = sync_point_done;
@@ -1708,17 +1556,15 @@ H5AC_set_sync_point_done_callback(H5C_t * cache_ptr,
*/
#ifdef H5_HAVE_PARALLEL
herr_t
-H5AC_set_write_done_callback(H5C_t * cache_ptr,
- void (* write_done)(void))
+H5AC_set_write_done_callback(H5C_t * cache_ptr, void (* write_done)(void))
{
H5AC_aux_t * aux_ptr;
FUNC_ENTER_NOAPI_NOINIT_NOERR
+ /* Sanity checks */
HDassert(cache_ptr && (cache_ptr->magic == H5C__H5C_T_MAGIC));
-
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
HDassert( aux_ptr != NULL );
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
@@ -1728,6 +1574,7 @@ H5AC_set_write_done_callback(H5C_t * cache_ptr,
} /* H5AC_set_write_done_callback() */
#endif /* H5_HAVE_PARALLEL */
+#ifndef NDEBUG /* debugging functions */
/*-------------------------------------------------------------------------
* Function: H5AC_stats
@@ -1744,10 +1591,9 @@ H5AC_set_write_done_callback(H5C_t * cache_ptr,
herr_t
H5AC_stats(const H5F_t *f)
{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
@@ -1755,8 +1601,7 @@ H5AC_stats(const H5F_t *f)
/* at present, this can't fail */
(void)H5C_stats(f->shared->cache, H5F_OPEN_NAME(f), FALSE);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5AC_stats() */
@@ -1780,18 +1625,18 @@ H5AC_dump_cache(const H5F_t *f)
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
- if ( H5C_dump_cache(f->shared->cache, H5F_OPEN_NAME(f)) < 0 ) {
-
+ if(H5C_dump_cache(f->shared->cache, H5F_OPEN_NAME(f)) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_dump_cache() failed.")
- }
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_dump_cache() */
+#endif /* NDEBUG */
/*-------------------------------------------------------------------------
@@ -1807,59 +1652,36 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
- H5AC_cache_config_t *config_ptr)
+H5AC_get_cache_auto_resize_config(const H5AC_t *cache_ptr,
+ H5AC_cache_config_t *config_ptr)
{
- herr_t result;
- herr_t ret_value = SUCCEED; /* Return value */
- hbool_t evictions_enabled;
H5C_auto_size_ctl_t internal_config;
+ hbool_t evictions_enabled;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if ( ( cache_ptr == NULL )
- ||
+ /* Check args */
+ if((cache_ptr == NULL) ||
#ifdef H5_HAVE_PARALLEL
- ( ( cache_ptr->aux_ptr != NULL )
- &&
- ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
- !=
- H5AC__H5AC_AUX_T_MAGIC
- )
- )
- ||
+ ((cache_ptr->aux_ptr != NULL) &&
+ (((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic != H5AC__H5AC_AUX_T_MAGIC))
+ ||
#endif /* H5_HAVE_PARALLEL */
- ( config_ptr == NULL )
- ||
- ( config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION )
- )
- {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Bad cache_ptr or config_ptr on entry.")
-
- }
-
- result = H5C_get_cache_auto_resize_config((const H5C_t *)cache_ptr,
- &internal_config);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_cache_auto_resize_config() failed.")
- }
+ (config_ptr == NULL) || (config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or config_ptr on entry.")
+ /* Retrieve the configuration */
+ if(H5C_get_cache_auto_resize_config((const H5C_t *)cache_ptr, &internal_config) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_auto_resize_config() failed.")
if(H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_resize_enabled() failed.")
- if ( internal_config.rpt_fcn == NULL ) {
-
+ /* Set the information to return */
+ if(internal_config.rpt_fcn == NULL)
config_ptr->rpt_fcn_enabled = FALSE;
-
- } else {
-
+ else
config_ptr->rpt_fcn_enabled = TRUE;
- }
-
config_ptr->open_trace_file = FALSE;
config_ptr->close_trace_file = FALSE;
config_ptr->trace_file_name[0] = '\0';
@@ -1883,35 +1705,24 @@ H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
config_ptr->decrement = internal_config.decrement;
config_ptr->apply_max_decrement = internal_config.apply_max_decrement;
config_ptr->max_decrement = internal_config.max_decrement;
- config_ptr->epochs_before_eviction =
- (int)(internal_config.epochs_before_eviction);
+ config_ptr->epochs_before_eviction = (int)(internal_config.epochs_before_eviction);
config_ptr->apply_empty_reserve = internal_config.apply_empty_reserve;
config_ptr->empty_reserve = internal_config.empty_reserve;
-
#ifdef H5_HAVE_PARALLEL
- if ( cache_ptr->aux_ptr != NULL ) {
-
- config_ptr->dirty_bytes_threshold =
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold;
- config_ptr->metadata_write_strategy =
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy;
-
- } else {
+ if(cache_ptr->aux_ptr != NULL) {
+ config_ptr->dirty_bytes_threshold = ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold;
+ config_ptr->metadata_write_strategy = ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy;
+ } /* end if */
+ else {
#endif /* H5_HAVE_PARALLEL */
-
- config_ptr->dirty_bytes_threshold =
- H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD;
- config_ptr->metadata_write_strategy =
- H5AC__DEFAULT_METADATA_WRITE_STRATEGY;
-
+ config_ptr->dirty_bytes_threshold = H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD;
+ config_ptr->metadata_write_strategy = H5AC__DEFAULT_METADATA_WRITE_STRATEGY;
#ifdef H5_HAVE_PARALLEL
- }
+ } /* end else */
#endif /* H5_HAVE_PARALLEL */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_get_cache_auto_resize_config() */
@@ -1928,33 +1739,19 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_get_cache_size(H5AC_t * cache_ptr,
- size_t * max_size_ptr,
- size_t * min_clean_size_ptr,
- size_t * cur_size_ptr,
- int32_t * cur_num_entries_ptr)
+H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr,
+ size_t *cur_size_ptr, int32_t *cur_num_entries_ptr)
{
- herr_t result;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- result = H5C_get_cache_size((H5C_t *)cache_ptr,
- max_size_ptr,
- min_clean_size_ptr,
- cur_size_ptr,
- cur_num_entries_ptr);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_cache_size() failed.")
- }
+ if(H5C_get_cache_size((H5C_t *)cache_ptr, max_size_ptr, min_clean_size_ptr,
+ cur_size_ptr, cur_num_entries_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_size() failed.")
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_get_cache_size() */
@@ -1971,7 +1768,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_get_cache_hit_rate(H5AC_t * cache_ptr, double * hit_rate_ptr)
+H5AC_get_cache_hit_rate(H5AC_t *cache_ptr, double *hit_rate_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -2000,23 +1797,15 @@ done:
herr_t
H5AC_reset_cache_hit_rate_stats(H5AC_t * cache_ptr)
{
- herr_t result;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- result = H5C_reset_cache_hit_rate_stats((H5C_t *)cache_ptr);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_reset_cache_hit_rate_stats() failed.")
- }
+ if(H5C_reset_cache_hit_rate_stats((H5C_t *)cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats() failed.")
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_reset_cache_hit_rate_stats() */
@@ -2033,124 +1822,84 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr,
- H5AC_cache_config_t *config_ptr)
+H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, H5AC_cache_config_t *config_ptr)
{
- herr_t result;
- herr_t ret_value = SUCCEED; /* Return value */
H5C_auto_size_ctl_t internal_config;
#if H5AC__TRACE_FILE_ENABLED
H5AC_cache_config_t trace_config = H5AC__DEFAULT_CACHE_CONFIG;
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- HDassert( cache_ptr );
+ /* Sanity checks */
+ HDassert(cache_ptr);
#if H5AC__TRACE_FILE_ENABLED
/* Make note of the new configuration. Don't look up the trace file
* pointer, as that may change before we use it.
*/
- if ( config_ptr != NULL ) {
-
+ if(config_ptr != NULL)
trace_config = *config_ptr;
-
- }
#endif /* H5AC__TRACE_FILE_ENABLED */
- if ( ( cache_ptr == NULL )
+ if((cache_ptr == NULL)
#ifdef H5_HAVE_PARALLEL
- ||
- ( ( cache_ptr->aux_ptr != NULL )
- &&
- (
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
- !=
- H5AC__H5AC_AUX_T_MAGIC
- )
- )
+ || ((cache_ptr->aux_ptr != NULL) &&
+ (((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic != H5AC__H5AC_AUX_T_MAGIC))
#endif /* H5_HAVE_PARALLEL */
- ) {
-
+ )
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad cache_ptr on entry.")
- }
-
- result = H5AC_validate_config(config_ptr);
-
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Bad cache configuration");
- }
-
- if ( config_ptr->open_trace_file ) {
-
- FILE * file_ptr = NULL;
-
- if ( H5C_get_trace_file_ptr(cache_ptr, &file_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_trace_file_ptr() failed.")
- }
-
- if ( ( ! ( config_ptr->close_trace_file ) ) &&
- ( file_ptr != NULL ) ) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "Trace file already open.")
- }
- }
-
- if ( config_ptr->close_trace_file ) {
+ /* Validate external configuration */
+ if(H5AC_validate_config(config_ptr) != SUCCEED)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache configuration");
- if ( H5AC_close_trace_file(cache_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5AC_close_trace_file() failed.")
- }
- }
+ if(config_ptr->open_trace_file) {
+ FILE * file_ptr;
- if ( config_ptr->open_trace_file ) {
+ if(NULL == (file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_trace_file_ptr() failed.")
- if ( H5AC_open_trace_file(cache_ptr, config_ptr->trace_file_name) < 0 )
- {
+ if((!(config_ptr->close_trace_file)) && (file_ptr != NULL))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Trace file already open.")
+ } /* end if */
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "H5AC_open_trace_file() failed.")
- }
- }
+ /* Close & reopen trace file, if requested */
+ if(config_ptr->close_trace_file)
+ if(H5AC_close_trace_file(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_close_trace_file() failed.")
+ if(config_ptr->open_trace_file)
+ if(H5AC_open_trace_file(cache_ptr, config_ptr->trace_file_name) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "H5AC_open_trace_file() failed.")
- if(H5AC_ext_config_2_int_config(config_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_ext_config_2_int_config() failed.")
+ /* Convert external configuration to internal representation */
+ if(H5AC__ext_config_2_int_config(config_ptr, &internal_config) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed.")
+ /* Set configuration */
if(H5C_set_cache_auto_resize_config(cache_ptr, &internal_config) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_cache_auto_resize_config() failed.")
-
if(H5C_set_evictions_enabled(cache_ptr, config_ptr->evictions_enabled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_evictions_enabled() failed.")
#ifdef H5_HAVE_PARALLEL
- if ( cache_ptr->aux_ptr != NULL ) {
-
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold =
- config_ptr->dirty_bytes_threshold;
-
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy =
- config_ptr->metadata_write_strategy;
- }
+ /* Set parallel configuration values */
+ /* (Which are only held in the H5AC layer -QAK) */
+ if(cache_ptr->aux_ptr != NULL) {
+ ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold = config_ptr->dirty_bytes_threshold;
+ ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy = config_ptr->metadata_write_strategy;
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
done:
-
#if H5AC__TRACE_FILE_ENABLED
/* For the set cache auto resize config call, only the contents
* of the config is necessary in the trace file. Write the return
* value to catch occult errors.
*/
- if ( ( cache_ptr != NULL ) &&
- ( H5C_get_trace_file_ptr(cache_ptr, &trace_file_ptr) >= 0 ) &&
- ( trace_file_ptr != NULL ) ) {
-
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
HDfprintf(trace_file_ptr,
"%s %d %d %d %d \"%s\" %d %d %d %f %d %d %ld %d %f %f %d %f %f %d %d %d %f %f %d %d %d %d %f %zu %d %d\n",
"H5AC_set_cache_auto_resize_config",
@@ -2185,11 +1934,9 @@ done:
trace_config.dirty_bytes_threshold,
trace_config.metadata_write_strategy,
(int)ret_value);
- }
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_set_cache_auto_resize_config() */
@@ -2216,27 +1963,18 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_validate_config(H5AC_cache_config_t * config_ptr)
+H5AC_validate_config(H5AC_cache_config_t *config_ptr)
{
H5C_auto_size_ctl_t internal_config;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
+ /* Check args */
if(config_ptr == NULL)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL config_ptr on entry.")
-
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL config_ptr on entry.")
if(config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown config version.")
-
- if((config_ptr->rpt_fcn_enabled != TRUE) && (config_ptr->rpt_fcn_enabled != FALSE))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->rpt_fcn_enabled must be either TRUE or FALSE.")
-
- if((config_ptr->open_trace_file != TRUE) && (config_ptr->open_trace_file != FALSE))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->open_trace_file must be either TRUE or FALSE.")
-
- if((config_ptr->close_trace_file != TRUE) && (config_ptr->close_trace_file != FALSE))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->close_trace_file must be either TRUE or FALSE.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Unknown config version.")
/* don't bother to test trace_file_name unless open_trace_file is TRUE */
if(config_ptr->open_trace_file) {
@@ -2247,45 +1985,32 @@ H5AC_validate_config(H5AC_cache_config_t * config_ptr)
* sanity checks on the length of the file name.
*/
name_len = HDstrlen(config_ptr->trace_file_name);
+ if(name_len == 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name is empty.")
+ else if(name_len > H5AC__MAX_TRACE_FILE_NAME_LEN)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name too long.")
+ } /* end if */
- if(name_len == 0) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name is empty.")
- } else if(name_len > H5AC__MAX_TRACE_FILE_NAME_LEN) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name too long.")
- }
- }
-
- if ( ( config_ptr->evictions_enabled != TRUE ) &&
- ( config_ptr->evictions_enabled != FALSE ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "config_ptr->evictions_enabled must be either TRUE or FALSE.")
- }
-
- if ( ( config_ptr->evictions_enabled == FALSE ) &&
- ( ( config_ptr->incr_mode != H5C_incr__off ) ||
- ( config_ptr->flash_incr_mode != H5C_flash_incr__off ) ||
- ( config_ptr->decr_mode != H5C_decr__off ) ) ) {
+ if((config_ptr->evictions_enabled == FALSE) &&
+ ((config_ptr->incr_mode != H5C_incr__off) ||
+ (config_ptr->flash_incr_mode != H5C_flash_incr__off) ||
+ (config_ptr->decr_mode != H5C_decr__off)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Can't disable evictions while auto-resize is enabled.")
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "Can't disable evictions while auto-resize is enabled.")
- }
-
- if(config_ptr->dirty_bytes_threshold < H5AC__MIN_DIRTY_BYTES_THRESHOLD) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too small.")
- } else if(config_ptr->dirty_bytes_threshold > H5AC__MAX_DIRTY_BYTES_THRESHOLD) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too big.")
- }
+ if(config_ptr->dirty_bytes_threshold < H5AC__MIN_DIRTY_BYTES_THRESHOLD)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too small.")
+ else if(config_ptr->dirty_bytes_threshold > H5AC__MAX_DIRTY_BYTES_THRESHOLD)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too big.")
if((config_ptr->metadata_write_strategy != H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY) &&
(config_ptr->metadata_write_strategy != H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->metadata_write_strategy out of range.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->metadata_write_strategy out of range.")
- if(H5AC_ext_config_2_int_config(config_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_ext_config_2_int_config() failed.")
+ if(H5AC__ext_config_2_int_config(config_ptr, &internal_config) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed.")
if(H5C_validate_resize_config(&internal_config, H5C_RESIZE_CFG__VALIDATE_ALL) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "error(s) in new config.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "error(s) in new config.")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2309,44 +2034,29 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_close_trace_file(H5AC_t * cache_ptr)
-
+H5AC_close_trace_file(H5AC_t *cache_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
FILE * trace_file_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if ( cache_ptr == NULL ) {
+ if(cache_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL cache_ptr on entry.")
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL cache_ptr on entry.")
- }
-
- if ( H5C_get_trace_file_ptr(cache_ptr, &trace_file_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_trace_file_ptr() failed.")
- }
+ if(NULL == (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_trace_file_ptr() failed.")
- if ( trace_file_ptr != NULL ) {
-
- if ( H5C_set_trace_file_ptr(cache_ptr, NULL) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_set_trace_file_ptr() failed.")
- }
-
- if ( HDfclose(trace_file_ptr) != 0 ) {
+ if(trace_file_ptr != NULL) {
+ if(H5C_set_trace_file_ptr(cache_ptr, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_trace_file_ptr() failed.")
- HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, \
- "can't close metadata cache trace file")
- }
- }
+ if(HDfclose(trace_file_ptr) != 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, "can't close metadata cache trace file")
+ } /* end if */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_close_trace_file() */
@@ -2367,99 +2077,57 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_open_trace_file(H5AC_t * cache_ptr,
- const char * trace_file_name)
+H5AC_open_trace_file(H5AC_t *cache_ptr, const char *trace_file_name)
{
- herr_t ret_value = SUCCEED; /* Return value */
char file_name[H5AC__MAX_TRACE_FILE_NAME_LEN + H5C__PREFIX_LEN + 2];
FILE * file_ptr = NULL;
#ifdef H5_HAVE_PARALLEL
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
HDassert(cache_ptr);
- if ( cache_ptr == NULL ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache_ptr NULL on entry.")
- }
-
- if ( trace_file_name == NULL ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "NULL trace_file_name on entry.")
- }
-
- if ( HDstrlen(trace_file_name) > H5AC__MAX_TRACE_FILE_NAME_LEN ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "trace file name too long.")
- }
-
- if ( H5C_get_trace_file_ptr(cache_ptr, &file_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_trace_file_ptr() failed.")
- }
-
- if ( file_ptr != NULL ) {
-
- HGOTO_ERROR(H5E_FILE, H5E_FILEOPEN, FAIL, "trace file already open.")
- }
+ /* Check args */
+ if(cache_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "cache_ptr NULL on entry.")
+ if(trace_file_name == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL trace_file_name on entry.")
+ if(HDstrlen(trace_file_name) > H5AC__MAX_TRACE_FILE_NAME_LEN)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "trace file name too long.")
+ if(NULL != (file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_FILEOPEN, FAIL, "trace file already open.")
#ifdef H5_HAVE_PARALLEL
-
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- if ( cache_ptr->aux_ptr == NULL ) {
-
+ if(cache_ptr->aux_ptr == NULL)
sprintf(file_name, "%s", trace_file_name);
-
- } else {
-
- if ( aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC ) {
-
+ else {
+ if(aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad aux_ptr->magic.")
- }
sprintf(file_name, "%s.%d", trace_file_name, aux_ptr->mpi_rank);
+ } /* end else */
- }
-
- if ( HDstrlen(file_name) >
- H5AC__MAX_TRACE_FILE_NAME_LEN + H5C__PREFIX_LEN + 1 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "cooked trace file name too long.")
- }
-
+ if(HDstrlen(file_name) > H5AC__MAX_TRACE_FILE_NAME_LEN + H5C__PREFIX_LEN + 1)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "cooked trace file name too long.")
#else /* H5_HAVE_PARALLEL */
-
- HDsnprintf(file_name,
- (size_t)(H5AC__MAX_TRACE_FILE_NAME_LEN + H5C__PREFIX_LEN + 1),
+ HDsnprintf(file_name, (size_t)(H5AC__MAX_TRACE_FILE_NAME_LEN + H5C__PREFIX_LEN + 1),
"%s", trace_file_name);
-
#endif /* H5_HAVE_PARALLEL */
- if ( (file_ptr = HDfopen(file_name, "w")) == NULL ) {
-
- /* trace file open failed */
+ if((file_ptr = HDfopen(file_name, "w")) == NULL)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "trace file open failed.")
- }
HDfprintf(file_ptr, "### HDF5 metadata cache trace file version 1 ###\n");
- if ( H5C_set_trace_file_ptr(cache_ptr, file_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_set_trace_file_ptr() failed.")
- }
+ if(H5C_set_trace_file_ptr(cache_ptr, file_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_trace_file_ptr() failed.")
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_open_trace_file() */
@@ -2483,8 +2151,7 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
herr_t
-H5AC_add_candidate(H5AC_t * cache_ptr,
- haddr_t addr)
+H5AC_add_candidate(H5AC_t * cache_ptr, haddr_t addr)
{
H5AC_aux_t * aux_ptr;
H5AC_slist_entry_t * slist_entry_ptr = NULL;
@@ -2492,36 +2159,31 @@ H5AC_add_candidate(H5AC_t * cache_ptr,
FUNC_ENTER_NOAPI(FAIL)
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert(aux_ptr->candidate_slist_ptr != NULL);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
- HDassert( aux_ptr->candidate_slist_ptr != NULL );
-
- /* If the supplied address appears in the candidate list, scream and die. */
- if(NULL != H5SL_search(aux_ptr->candidate_slist_ptr, (void *)(&addr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry already in candidate slist.")
-
- /* otherwise, construct an entry for the supplied address, and insert
+ /* Construct an entry for the supplied address, and insert
* it into the candidate slist.
*/
- if(NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "Can't allocate candidate slist entry .")
-
- slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
+ if(NULL == (slist_entry_ptr = H5FL_MALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate candidate slist entry")
slist_entry_ptr->addr = addr;
if(H5SL_insert(aux_ptr->candidate_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist.")
-
- aux_ptr->candidate_slist_len += 1;
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist")
done:
+ /* Clean up on error */
+ if(ret_value < 0)
+ if(slist_entry_ptr)
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_add_candidate() */
#endif /* H5_HAVE_PARALLEL */
@@ -2568,33 +2230,22 @@ done:
*
* Programmer: John Mainzer, 5/30/14
*
- * Changes:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
#ifndef NDEBUG
herr_t
-H5AC_get_entry_ptr_from_addr(const H5F_t *f,
- haddr_t addr,
- void ** entry_ptr_ptr)
+H5AC_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr, void **entry_ptr_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if ( H5C_get_entry_ptr_from_addr(f, addr, entry_ptr_ptr) < 0 )
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_entry_ptr_from_addr() failed.")
+ if(H5C_get_entry_ptr_from_addr(f, addr, entry_ptr_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_ptr_from_addr() failed")
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_get_entry_ptr_from_addr() */
-
#endif /* NDEBUG */
@@ -2624,36 +2275,23 @@ done:
*
* Programmer: John Mainzer, 5/30/14
*
- * Changes:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
#ifndef NDEBUG
herr_t
-H5AC_verify_entry_type(const H5F_t *f,
- haddr_t addr,
- const H5AC_class_t * expected_type,
- hbool_t * in_cache_ptr,
- hbool_t * type_ok_ptr)
+H5AC_verify_entry_type(const H5F_t *f, haddr_t addr, const H5AC_class_t *expected_type,
+ hbool_t *in_cache_ptr, hbool_t *type_ok_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if ( H5C_verify_entry_type(f, addr, (const H5C_class_t *)expected_type,
- in_cache_ptr, type_ok_ptr) < 0 )
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_verify_entry_type() failed.")
+ if(H5C_verify_entry_type(f, addr, expected_type, in_cache_ptr, type_ok_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_verify_entry_type() failed")
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_verify_entry_type() */
-
#endif /* NDEBUG */
@@ -2664,7 +2302,7 @@ done:
/*-------------------------------------------------------------------------
*
- * Function: H5AC_broadcast_candidate_list()
+ * Function: H5AC__broadcast_candidate_list()
*
* Purpose: Broadcast the contents of the process 0 candidate entry
* slist. In passing, also remove all entries from said
@@ -2688,98 +2326,125 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_broadcast_candidate_list(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr)
+H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, int *num_entries_ptr,
+ haddr_t **haddr_buf_ptr_ptr)
{
- hbool_t success = FALSE;
H5AC_aux_t * aux_ptr = NULL;
haddr_t * haddr_buf_ptr = NULL;
- MPI_Offset * MPI_Offset_buf_ptr = NULL;
- size_t buf_size = 0;
int mpi_result;
- int chk_num_entries = 0;
- int num_entries = 0;
+ int num_entries;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->mpi_rank == 0 );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
- HDassert( aux_ptr->candidate_slist_ptr != NULL );
- HDassert( H5SL_count(aux_ptr->candidate_slist_ptr) ==
- (size_t)(aux_ptr->candidate_slist_len) );
- HDassert( num_entries_ptr != NULL );
- HDassert( *num_entries_ptr == 0 );
- HDassert( haddr_buf_ptr_ptr != NULL );
- HDassert( *haddr_buf_ptr_ptr == NULL );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank == 0);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert(aux_ptr->candidate_slist_ptr != NULL);
+ HDassert(num_entries_ptr != NULL);
+ HDassert(*num_entries_ptr == 0);
+ HDassert(haddr_buf_ptr_ptr != NULL);
+ HDassert(*haddr_buf_ptr_ptr == NULL);
/* First broadcast the number of entries in the list so that the
* receivers can set up buffers to receive them. If there aren't
* any, we are done.
*/
- num_entries = aux_ptr->candidate_slist_len;
+ num_entries = (int)H5SL_count(aux_ptr->candidate_slist_ptr);
if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
if(num_entries > 0) {
+ size_t buf_size = 0;
+ int chk_num_entries = 0;
+
/* convert the candidate list into the format we
* are used to receiving from process 0, and also load it
* into a buffer for transmission.
*/
- if(H5AC_copy_candidate_list_to_buffer(cache_ptr, &chk_num_entries,
- &haddr_buf_ptr, &buf_size, &MPI_Offset_buf_ptr) < 0)
+ if(H5AC__copy_candidate_list_to_buffer(cache_ptr, &chk_num_entries, &haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer.")
+ HDassert(chk_num_entries == num_entries);
+ HDassert(haddr_buf_ptr != NULL);
- HDassert( chk_num_entries == num_entries );
- HDassert( haddr_buf_ptr != NULL );
- HDassert( MPI_Offset_buf_ptr != NULL );
- HDassert( aux_ptr->candidate_slist_len == 0 );
-
- /* Now broadcast the list of candidate entries -- if there is one.
- *
- * The peculiar structure of the following call to MPI_Bcast is
- * due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
- * buf_size computed above.
- */
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
+ /* Now broadcast the list of candidate entries */
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)haddr_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
} /* end if */
- success = TRUE;
+ /* Pass the number of entries and the buffer pointer
+ * back to the caller. Do this so that we can use the same code
+ * to apply the candidate list to all the processes.
+ */
+ *num_entries_ptr = num_entries;
+ *haddr_buf_ptr_ptr = haddr_buf_ptr;
done:
- if(MPI_Offset_buf_ptr != NULL)
- MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
-
- if(success) {
- /* Pass the number of entries and the buffer pointer
- * back to the caller. Do this so that we can use the same code
- * to apply the candidate list to all the processes.
- */
- *num_entries_ptr = num_entries;
- *haddr_buf_ptr_ptr = haddr_buf_ptr;
- } else if(haddr_buf_ptr != NULL) {
- haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
- }
+ if(ret_value < 0)
+ if(haddr_buf_ptr)
+ haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_broadcast_candidate_list() */
+} /* H5AC__broadcast_candidate_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_broadcast_clean_list()
+ * Function: H5AC__broadcast_clean_list_cb()
+ *
+ * Purpose: Skip list callback for building array of addresses for
+ * broadcasting the clean list.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: Quincey Koziol, 6/12/15
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC__broadcast_clean_list_cb(void *_item, void H5_ATTR_UNUSED *_key,
+ void *_udata)
+{
+ H5AC_slist_entry_t * slist_entry_ptr = (H5AC_slist_entry_t *)_item; /* Address of item */
+ H5AC_addr_list_ud_t * udata = (H5AC_addr_list_ud_t *)_udata; /* Context for callback */
+ haddr_t addr;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(slist_entry_ptr);
+ HDassert(udata);
+
+ /* Store the entry's address in the buffer */
+ addr = slist_entry_ptr->addr;
+ udata->addr_buf_ptr[udata->i] = addr;
+ udata->i++;
+
+ /* now release the entry */
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
+
+ /* and also remove the matching entry from the dirtied list
+ * if it exists.
+ */
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(udata->aux_ptr->d_slist_ptr, (void *)(&addr))))
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5AC__broadcast_clean_list_cb() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC__broadcast_clean_list()
*
* Purpose: Broadcast the contents of the process 0 cleaned entry
* slist. In passing, also remove all entries from said
@@ -2799,174 +2464,76 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
+H5AC__broadcast_clean_list(H5AC_t * cache_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
- haddr_t addr;
haddr_t * addr_buf_ptr = NULL;
- H5AC_aux_t * aux_ptr = NULL;
- H5SL_node_t * slist_node_ptr = NULL;
- H5AC_slist_entry_t * slist_entry_ptr = NULL;
- MPI_Offset * buf_ptr = NULL;
- size_t buf_size;
- int i = 0;
+ H5AC_aux_t * aux_ptr;
int mpi_result;
int num_entries = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->mpi_rank == 0 );
- HDassert( aux_ptr->c_slist_ptr != NULL );
- HDassert( H5SL_count(aux_ptr->c_slist_ptr) ==
- (size_t)(aux_ptr->c_slist_len) );
-
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank == 0);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
/* First broadcast the number of entries in the list so that the
* receives can set up a buffer to receive them. If there aren't
* any, we are done.
*/
- num_entries = aux_ptr->c_slist_len;
-
- mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm);
-
- if ( mpi_result != MPI_SUCCESS ) {
-
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
+ num_entries = (int)H5SL_count(aux_ptr->c_slist_ptr);
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
- }
+ if(num_entries > 0) {
+ H5AC_addr_list_ud_t udata;
+ size_t buf_size;
- if ( num_entries > 0 )
- {
/* allocate a buffer to store the list of entry base addresses in */
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ if(NULL == (addr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for addr buffer")
+
+ /* Set up user data for callback */
+ udata.aux_ptr = aux_ptr;
+ udata.addr_buf_ptr = addr_buf_ptr;
+ udata.i = 0;
+
+ /* Free all the clean list entries, building the address list in the callback */
+ /* (Callback also removes the matching entries from the dirtied list) */
+ if(H5SL_free(aux_ptr->c_slist_ptr, H5AC__broadcast_clean_list_cb, &udata) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "Can't build address list for clean entries")
+
+ /* Now broadcast the list of cleaned entries */
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)addr_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
+ } /* end if */
- buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
-
- buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size);
-
- if ( buf_ptr == NULL ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "memory allocation failed for clean entry buffer")
- }
-
- /* if the sync_point_done callback is defined, allocate the
- * addr buffer as well.
- */
- if ( aux_ptr->sync_point_done != NULL ) {
-
- addr_buf_ptr = (haddr_t *)H5MM_malloc((size_t)num_entries * sizeof(haddr_t));
-
- if ( addr_buf_ptr == NULL ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "memory allocation failed for addr buffer")
- }
- }
-
-
- /* now load the entry base addresses into the buffer, emptying the
- * cleaned entry list in passing
- */
-
- while ( NULL != (slist_node_ptr = H5SL_first(aux_ptr->c_slist_ptr) ) )
- {
- slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_item(slist_node_ptr);
-
- HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
-
- HDassert( i < num_entries );
-
- addr = slist_entry_ptr->addr;
-
- if ( addr_buf_ptr != NULL ) {
-
- addr_buf_ptr[i] = addr;
- }
-
- if ( H5FD_mpi_haddr_to_MPIOff(addr, &(buf_ptr[i])) < 0 ) {
-
- HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, \
- "can't convert from haddr to MPI off")
- }
-
- i++;
-
- /* now remove the entry from the cleaned entry list */
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from cleaned entry slist.")
- }
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->c_slist_len -= 1;
-
- HDassert( aux_ptr->c_slist_len >= 0 );
-
- /* and also remove the matching entry from the dirtied list
- * if it exists.
- */
- if((slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr))) != NULL) {
- HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
- HDassert( slist_entry_ptr->addr == addr );
-
- if(H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from dirty entry slist.")
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->d_slist_len -= 1;
-
- HDassert( aux_ptr->d_slist_len >= 0 );
- } /* end if */
- } /* while */
-
-
- /* Now broadcast the list of cleaned entries -- if there is one.
- *
- * The peculiar structure of the following call to MPI_Bcast is
- * due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
- * buf_size computed above.
- */
-
- mpi_result = MPI_Bcast((void *)buf_ptr, (int)buf_size, MPI_BYTE, 0,
- aux_ptr->mpi_comm);
-
- if ( mpi_result != MPI_SUCCESS ) {
-
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
- }
- }
-
- if(aux_ptr->sync_point_done != NULL)
+ /* if it is defined, call the sync point done callback. Note
+ * that this callback is defined purely for testing purposes,
+ * and should be undefined under normal operating circumstances.
+ */
+ if(aux_ptr->sync_point_done)
(aux_ptr->sync_point_done)(num_entries, addr_buf_ptr);
done:
- if(buf_ptr != NULL)
- buf_ptr = (MPI_Offset *)H5MM_xfree((void *)buf_ptr);
- if(addr_buf_ptr != NULL)
+ if(addr_buf_ptr)
addr_buf_ptr = (haddr_t *)H5MM_xfree((void *)addr_buf_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_broadcast_clean_list() */
+} /* H5AC__broadcast_clean_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_check_if_write_permitted
+ * Function: H5AC__check_if_write_permitted
*
* Purpose: Determine if a write is permitted under the current
* circumstances, and set *write_permitted_ptr accordingly.
@@ -2983,62 +2550,44 @@ done:
*
*-------------------------------------------------------------------------
*/
-#ifdef H5_HAVE_PARALLEL
-static herr_t
-H5AC_check_if_write_permitted(const H5F_t *f,
- hid_t H5_ATTR_UNUSED dxpl_id,
- hbool_t * write_permitted_ptr)
-#else /* H5_HAVE_PARALLEL */
static herr_t
-H5AC_check_if_write_permitted(const H5F_t H5_ATTR_UNUSED * f,
- hid_t H5_ATTR_UNUSED dxpl_id,
- hbool_t * write_permitted_ptr)
+H5AC__check_if_write_permitted(const H5F_t
+#ifndef H5_HAVE_PARALLEL
+H5_ATTR_UNUSED
#endif /* H5_HAVE_PARALLEL */
+ *f, hbool_t *write_permitted_ptr)
{
- hbool_t write_permitted = TRUE;
- herr_t ret_value = SUCCEED; /* Return value */
#ifdef H5_HAVE_PARALLEL
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
+ hbool_t write_permitted = TRUE;
-
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC_NOERR
#ifdef H5_HAVE_PARALLEL
- HDassert( f != NULL );
- HDassert( f->shared != NULL );
- HDassert( f->shared->cache != NULL );
-
+ /* Sanity checks */
+ HDassert(f != NULL);
+ HDassert(f->shared != NULL);
+ HDassert(f->shared->cache != NULL);
aux_ptr = (H5AC_aux_t *)(f->shared->cache->aux_ptr);
+ if(aux_ptr != NULL) {
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- if ( aux_ptr != NULL ) {
-
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
-
- if ( ( aux_ptr->mpi_rank == 0 ) ||
- ( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED ) ) {
-
+ if((aux_ptr->mpi_rank == 0) || (aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED))
write_permitted = aux_ptr->write_permitted;
-
- } else {
-
+ else
write_permitted = FALSE;
- }
- }
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
*write_permitted_ptr = write_permitted;
-done:
-
- FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5AC_check_if_write_permitted() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5AC__check_if_write_permitted() */
/*-------------------------------------------------------------------------
- * Function: H5AC_construct_candidate_list()
+ * Function: H5AC__construct_candidate_list()
*
* Purpose: In the parallel case when the metadata_write_strategy is
* H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, process 0 uses
@@ -3058,29 +2607,26 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_construct_candidate_list(H5AC_t * cache_ptr,
- H5AC_aux_t * aux_ptr,
- int sync_point_op)
+H5AC__construct_candidate_list(H5AC_t *cache_ptr, H5AC_aux_t *aux_ptr,
+ int sync_point_op)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
- HDassert( ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE ) ||
- ( aux_ptr->mpi_rank == 0 ) );
- HDassert( aux_ptr->d_slist_ptr != NULL );
- HDassert( aux_ptr->c_slist_ptr != NULL );
- HDassert( aux_ptr->c_slist_len == 0 );
- HDassert( aux_ptr->candidate_slist_ptr != NULL );
- HDassert( aux_ptr->candidate_slist_len == 0 );
- HDassert( ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN ) ||
- ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE ) );
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE) || (aux_ptr->mpi_rank == 0));
+ HDassert(aux_ptr->d_slist_ptr != NULL);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
+ HDassert(H5SL_count(aux_ptr->c_slist_ptr) == 0);
+ HDassert(aux_ptr->candidate_slist_ptr != NULL);
+ HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) == 0);
+ HDassert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) || (sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE));
switch(sync_point_op) {
case H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN:
@@ -3100,13 +2646,52 @@ H5AC_construct_candidate_list(H5AC_t * cache_ptr,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_construct_candidate_list() */
+} /* H5AC__construct_candidate_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_copy_candidate_list_to_buffer
+ * Function: H5AC__copy_candidate_list_to_buffer_cb
+ *
+ * Purpose: Skip list callback for building array of addresses for
+ * broadcasting the candidate list.
+ *
+ * Return: Return SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: Quincey Koziol, 6/12/15
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC__copy_candidate_list_to_buffer_cb(void *_item, void H5_ATTR_UNUSED *_key,
+ void *_udata)
+{
+ H5AC_slist_entry_t * slist_entry_ptr = (H5AC_slist_entry_t *)_item; /* Address of item */
+ H5AC_addr_list_ud_t * udata = (H5AC_addr_list_ud_t *)_udata; /* Context for callback */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(slist_entry_ptr);
+ HDassert(udata);
+
+ /* Store the entry's address in the buffer */
+ udata->addr_buf_ptr[udata->i] = slist_entry_ptr->addr;
+ udata->i++;
+
+ /* now release the entry */
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5AC__copy_candidate_list_to_buffer_cb() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC__copy_candidate_list_to_buffer
*
* Purpose: Allocate buffer(s) and copy the contents of the candidate
* entry slist into it (them). In passing, remove all
@@ -3137,123 +2722,68 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_copy_candidate_list_to_buffer(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr,
- size_t * MPI_Offset_buf_size_ptr,
- MPI_Offset ** MPI_Offset_buf_ptr_ptr)
+H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, int *num_entries_ptr,
+ haddr_t **haddr_buf_ptr_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
- hbool_t success = FALSE;
- haddr_t addr;
H5AC_aux_t * aux_ptr = NULL;
- H5SL_node_t * slist_node_ptr = NULL;
- H5AC_slist_entry_t * slist_entry_ptr = NULL;
- MPI_Offset * MPI_Offset_buf_ptr = NULL;
+ H5AC_addr_list_ud_t udata;
haddr_t * haddr_buf_ptr = NULL;
size_t buf_size;
- int i = 0;
int num_entries = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert(aux_ptr->candidate_slist_ptr != NULL);
+ HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) > 0);
+ HDassert(num_entries_ptr != NULL);
+ HDassert(*num_entries_ptr == 0);
+ HDassert(haddr_buf_ptr_ptr != NULL);
+ HDassert(*haddr_buf_ptr_ptr == NULL);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
- HDassert( aux_ptr->candidate_slist_ptr != NULL );
- HDassert( H5SL_count(aux_ptr->candidate_slist_ptr) ==
- (size_t)(aux_ptr->candidate_slist_len) );
- HDassert( aux_ptr->candidate_slist_len > 0 );
- HDassert( num_entries_ptr != NULL );
- HDassert( *num_entries_ptr == 0 );
- HDassert( haddr_buf_ptr_ptr != NULL );
- HDassert( *haddr_buf_ptr_ptr == NULL );
-
- num_entries = aux_ptr->candidate_slist_len;
+ num_entries = (int)H5SL_count(aux_ptr->candidate_slist_ptr);
/* allocate a buffer(s) to store the list of candidate entry
* base addresses in
*/
- if(MPI_Offset_buf_ptr_ptr != NULL) {
- HDassert( MPI_Offset_buf_size_ptr != NULL );
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer")
- /* allocate a buffer of MPI_Offset */
- buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
- if(NULL == (MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for MPI_Offset buffer")
- } /* end if */
+ /* Set up user data for callback */
+ udata.aux_ptr = aux_ptr;
+ udata.addr_buf_ptr = haddr_buf_ptr;
+ udata.i = 0;
- /* allocate a buffer of haddr_t */
- if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * (size_t)num_entries)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for haddr buffer")
+ /* Free all the candidate list entries, building the address list in the callback */
+ if(H5SL_free(aux_ptr->candidate_slist_ptr, H5AC__copy_candidate_list_to_buffer_cb, &udata) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "Can't build address list for candidate entries")
- /* now load the entry base addresses into the buffer, emptying the
- * candidate entry list in passing
+ /* Pass the number of entries and the buffer pointer
+ * back to the caller.
*/
- while(NULL != (slist_node_ptr = H5SL_first(aux_ptr->candidate_slist_ptr))) {
- slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_item(slist_node_ptr);
-
- HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert( i < num_entries );
-
- addr = slist_entry_ptr->addr;
- haddr_buf_ptr[i] = addr;
- if(MPI_Offset_buf_ptr != NULL) {
- if(H5FD_mpi_haddr_to_MPIOff(addr, &(MPI_Offset_buf_ptr[i])) < 0)
- HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert from haddr to MPI off")
- } /* end if */
-
- i++;
-
- /* now remove the entry from the cleaned entry list */
- if(H5SL_remove(aux_ptr->candidate_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from candidate entry slist.")
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->candidate_slist_len -= 1;
-
- HDassert( aux_ptr->candidate_slist_len >= 0 );
- } /* while */
- HDassert( aux_ptr->candidate_slist_len == 0 );
-
- success = TRUE;
+ *num_entries_ptr = num_entries;
+ *haddr_buf_ptr_ptr = haddr_buf_ptr;
done:
- if(success) {
- /* Pass the number of entries and the buffer pointer
- * back to the caller.
- */
- *num_entries_ptr = num_entries;
- *haddr_buf_ptr_ptr = haddr_buf_ptr;
-
- if(MPI_Offset_buf_ptr_ptr != NULL) {
- HDassert( MPI_Offset_buf_ptr != NULL);
- *MPI_Offset_buf_size_ptr = buf_size;
- *MPI_Offset_buf_ptr_ptr = MPI_Offset_buf_ptr;
- } /* end if */
- } /* end if */
- else {
- if(MPI_Offset_buf_ptr != NULL)
- MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
- if(haddr_buf_ptr != NULL)
+ if(ret_value < 0)
+ if(haddr_buf_ptr)
haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
- } /* end else */
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_copy_candidate_list_to_buffer() */
+} /* H5AC__copy_candidate_list_to_buffer() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_ext_config_2_int_config()
+ * Function: H5AC__ext_config_2_int_config()
*
* Purpose: Utility function to translate an instance of
* H5AC_cache_config_t to an instance of H5C_auto_size_ctl_t.
@@ -3271,30 +2801,22 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC_ext_config_2_int_config(H5AC_cache_config_t * ext_conf_ptr,
- H5C_auto_size_ctl_t * int_conf_ptr)
+H5AC__ext_config_2_int_config(H5AC_cache_config_t *ext_conf_ptr,
+ H5C_auto_size_ctl_t *int_conf_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
- if ( ( ext_conf_ptr == NULL ) ||
- ( ext_conf_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION ) ||
- ( int_conf_ptr == NULL ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Bad ext_conf_ptr or inf_conf_ptr on entry.")
- }
+ if((ext_conf_ptr == NULL) || (ext_conf_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION) ||
+ (int_conf_ptr == NULL))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad ext_conf_ptr or inf_conf_ptr on entry.")
int_conf_ptr->version = H5C__CURR_AUTO_SIZE_CTL_VER;
-
- if ( ext_conf_ptr->rpt_fcn_enabled ) {
-
+ if(ext_conf_ptr->rpt_fcn_enabled)
int_conf_ptr->rpt_fcn = H5C_def_auto_resize_rpt_fcn;
-
- } else {
-
+ else
int_conf_ptr->rpt_fcn = NULL;
- }
int_conf_ptr->set_initial_size = ext_conf_ptr->set_initial_size;
int_conf_ptr->initial_size = ext_conf_ptr->initial_size;
@@ -3323,18 +2845,17 @@ H5AC_ext_config_2_int_config(H5AC_cache_config_t * ext_conf_ptr,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_ext_config_2_int_config() */
+} /* H5AC__ext_config_2_int_config() */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_log_deleted_entry()
+ * Function: H5AC__log_deleted_entry()
*
- * Purpose: Log an entry for which H5C__DELETED_FLAG has been set.
+ * Purpose: Log an entry which has been deleted.
*
- * If mpi_rank is 0, we must make sure that the entry doesn't
- * appear in the cleaned or dirty entry lists. Otherwise,
- * we have nothing to do.
+ * Only called for mpi_rank 0. We must make sure that the entry
+ * doesn't appear in the cleaned or dirty entry lists.
*
* Return SUCCEED on success, and FAIL on failure.
*
@@ -3346,80 +2867,48 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_log_deleted_entry(H5AC_t * cache_ptr,
- H5AC_info_t * entry_ptr,
- haddr_t addr,
- unsigned int flags)
+H5AC__log_deleted_entry(const H5AC_info_t *entry_ptr)
{
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
H5AC_slist_entry_t * slist_entry_ptr = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
+ haddr_t addr;
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC_NOERR
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ addr = entry_ptr->addr;
+ cache_ptr = entry_ptr->cache_ptr;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank == 0);
+ HDassert(aux_ptr->d_slist_ptr != NULL);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
-
- HDassert( entry_ptr != NULL );
- HDassert( entry_ptr->addr == addr );
-
- HDassert( (flags & H5C__DELETED_FLAG) != 0 );
-
- if(aux_ptr->mpi_rank == 0) {
- HDassert( aux_ptr->d_slist_ptr != NULL );
- HDassert( aux_ptr->c_slist_ptr != NULL );
-
- /* if the entry appears in the dirtied entry slist, remove it. */
- if((slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr))) != NULL) {
- HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert(slist_entry_ptr->addr == addr);
-
- if(H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from dirty entry slist.")
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->d_slist_len -= 1;
-
- HDassert( aux_ptr->d_slist_len >= 0 );
- } /* end if */
-
- /* if the entry appears in the cleaned entry slist, remove it. */
- if((slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr))) != NULL) {
- HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert(slist_entry_ptr->addr == addr);
-
- if(H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from cleaned entry slist.")
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->c_slist_len -= 1;
+ /* if the entry appears in the dirtied entry slist, remove it. */
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))))
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
- HDassert( aux_ptr->c_slist_len >= 0 );
- } /* end if */
- } /* if */
+ /* if the entry appears in the cleaned entry slist, remove it. */
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))))
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_log_deleted_entry() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5AC__log_deleted_entry() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_log_dirtied_entry()
+ * Function: H5AC__log_dirtied_entry()
*
* Purpose: Update the dirty_bytes count for a newly dirtied entry.
*
- * If mpi_rank isnt 0, this simply means adding the size
+ * If mpi_rank isn't 0, this simply means adding the size
* of the entries to the dirty_bytes count.
*
* If mpi_rank is 0, we must first check to see if the entry
@@ -3438,103 +2927,73 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
- haddr_t addr)
+H5AC__log_dirtied_entry(const H5AC_info_t *entry_ptr)
{
H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( entry_ptr );
- HDassert( entry_ptr->addr == addr );
- HDassert( entry_ptr->is_dirty == FALSE );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->is_dirty == FALSE);
cache_ptr = entry_ptr->cache_ptr;
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
-
- if ( aux_ptr->mpi_rank == 0 ) {
- H5AC_slist_entry_t * slist_entry_ptr;
-
- HDassert( aux_ptr->d_slist_ptr != NULL );
- HDassert( aux_ptr->c_slist_ptr != NULL );
+ if(aux_ptr->mpi_rank == 0) {
+ H5AC_slist_entry_t *slist_entry_ptr;
+ haddr_t addr = entry_ptr->addr;
- if ( H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr)) == NULL ) {
+ /* Sanity checks */
+ HDassert(aux_ptr->d_slist_ptr != NULL);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
+ if(NULL == H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr))) {
/* insert the address of the entry in the dirty entry list, and
* add its size to the dirty_bytes count.
*/
- if ( NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)) ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "Can't allocate dirty slist entry .")
- }
-
- slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
+ if(NULL == (slist_entry_ptr = H5FL_MALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate dirty slist entry .")
slist_entry_ptr->addr = addr;
- if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
- &(slist_entry_ptr->addr)) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
- "can't insert entry into dirty entry slist.")
- }
+ if(H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist.")
- aux_ptr->d_slist_len += 1;
aux_ptr->dirty_bytes += entry_ptr->size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
aux_ptr->unprotect_dirty_bytes += entry_ptr->size;
aux_ptr->unprotect_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
- }
-
- if(H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr)) != NULL) {
- /* the entry is dirty. If it exists on the cleaned entries list,
- * remove it.
- */
- if((slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr))) != NULL) {
- HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert(slist_entry_ptr->addr == addr);
-
- if(H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from clean entry slist.")
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->c_slist_len -= 1;
-
- HDassert( aux_ptr->c_slist_len >= 0 );
- } /* end if */
} /* end if */
- } else {
+ /* the entry is dirty. If it exists on the cleaned entries list,
+ * remove it.
+ */
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))))
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
+ } /* end if */
+ else {
aux_ptr->dirty_bytes += entry_ptr->size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
aux_ptr->unprotect_dirty_bytes += entry_size;
aux_ptr->unprotect_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
- }
+ } /* end else */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5AC_log_dirtied_entry() */
+} /* H5AC__log_dirtied_entry() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_log_flushed_entry()
+ * Function: H5AC__log_flushed_entry()
*
* Purpose: Update the clean entry slist for the flush of an entry --
* specifically, if the entry has been cleared, remove it
@@ -3555,117 +3014,58 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_log_flushed_entry(H5C_t * cache_ptr,
- haddr_t addr,
- hbool_t was_dirty,
- unsigned flags,
- int H5_ATTR_UNUSED type_id)
+H5AC__log_flushed_entry(H5C_t *cache_ptr, haddr_t addr, hbool_t was_dirty,
+ unsigned flags)
{
- herr_t ret_value = SUCCEED; /* Return value */
hbool_t cleared;
H5AC_aux_t * aux_ptr;
H5AC_slist_entry_t * slist_entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
+ FUNC_ENTER_STATIC
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
+ /* Sanity check */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank == 0);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->mpi_rank == 0 );
- HDassert( aux_ptr->c_slist_ptr != NULL );
-
- cleared = ( (flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0 );
-
- if ( cleared ) {
+ /* Set local flags */
+ cleared = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
+ if(cleared) {
/* If the entry has been cleared, must remove it from both the
* cleaned list and the dirtied list.
*/
-
- if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
- H5SL_search(aux_ptr->c_slist_ptr,
- (void *)(&addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert( slist_entry_ptr->addr == addr );
-
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from clean entry slist.")
- }
-
- slist_entry_ptr->magic = 0;
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))))
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->c_slist_len -= 1;
-
- HDassert( aux_ptr->c_slist_len >= 0 );
- }
-
- if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
- H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert( slist_entry_ptr->addr == addr );
-
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from dirty entry slist.")
- }
-
- slist_entry_ptr->magic = 0;
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))))
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->d_slist_len -= 1;
-
- HDassert( aux_ptr->d_slist_len >= 0 );
- }
- } else if ( was_dirty ) {
-
- if ( H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr)) == NULL ) {
-
+ } /* end if */
+ else if(was_dirty) {
+ if(NULL == H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr))) {
/* insert the address of the entry in the clean entry list. */
+ if(NULL == (slist_entry_ptr = H5FL_MALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate clean slist entry .")
+ slist_entry_ptr->addr = addr;
- if ( NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)) ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "Can't allocate clean slist entry .")
- }
-
- slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
- slist_entry_ptr->addr = addr;
-
- if ( H5SL_insert(aux_ptr->c_slist_ptr, slist_entry_ptr,
- &(slist_entry_ptr->addr)) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
- "can't insert entry into clean entry slist.")
- }
-
- aux_ptr->c_slist_len += 1;
- }
- }
+ if(H5SL_insert(aux_ptr->c_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into clean entry slist.")
+ } /* end if */
+ } /* end else-if */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5AC_log_flushed_entry() */
+} /* H5AC__log_flushed_entry() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_log_inserted_entry()
+ * Function: H5AC__log_inserted_entry()
*
* Purpose: Update the dirty_bytes count for a newly inserted entry.
*
@@ -3685,48 +3085,44 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_log_inserted_entry(H5AC_t * cache_ptr,
- H5AC_info_t * entry_ptr)
+H5AC__log_inserted_entry(const H5AC_info_t *entry_ptr)
{
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ cache_ptr = entry_ptr->cache_ptr;
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
HDassert(aux_ptr != NULL);
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- HDassert( entry_ptr != NULL );
-
if(aux_ptr->mpi_rank == 0) {
- H5AC_slist_entry_t * slist_entry_ptr;
+ H5AC_slist_entry_t *slist_entry_ptr;
HDassert(aux_ptr->d_slist_ptr != NULL);
HDassert(aux_ptr->c_slist_ptr != NULL);
- if(NULL != H5SL_search(aux_ptr->d_slist_ptr, (void *)(&entry_ptr->addr)))
+ /* Entry to insert should not be in dirty list currently */
+ if(NULL != H5SL_search(aux_ptr->d_slist_ptr, (const void *)(&entry_ptr->addr)))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Inserted entry already in dirty slist.")
/* insert the address of the entry in the dirty entry list, and
* add its size to the dirty_bytes count.
*/
- if(NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "Can't allocate dirty slist entry .")
-
- slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
+ if(NULL == (slist_entry_ptr = H5FL_MALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate dirty slist entry .")
slist_entry_ptr->addr = entry_ptr->addr;
-
- if(H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0 )
+ if(H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist.")
- aux_ptr->d_slist_len += 1;
-
- if(NULL != H5SL_search(aux_ptr->c_slist_ptr, (void *)(&entry_ptr->addr)))
+ /* Entry to insert should not be in clean list either */
+ if(NULL != H5SL_search(aux_ptr->c_slist_ptr, (const void *)(&entry_ptr->addr)))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Inserted entry in clean slist.")
} /* end if */
@@ -3739,13 +3135,13 @@ H5AC_log_inserted_entry(H5AC_t * cache_ptr,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_log_inserted_entry() */
+} /* H5AC__log_inserted_entry() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_log_moved_entry()
+ * Function: H5AC__log_moved_entry()
*
* Purpose: Update the dirty_bytes count for a moved entry.
*
@@ -3756,8 +3152,8 @@ done:
* moving in a collective operation and immediately after
* unprotecting the target entry.
*
- * This function uses this invarient, and will cause arcane
- * failures if it is not met. If maintaining this invarient
+ * This function uses this invariant, and will cause arcane
+ * failures if it is not met. If maintaining this invariant
* becomes impossible, we will have to rework this function
* extensively, and likely include a bit of IPC for
* synchronization. A better option might be to subsume
@@ -3791,100 +3187,52 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_log_moved_entry(const H5F_t *f,
- haddr_t old_addr,
- haddr_t new_addr)
+H5AC__log_moved_entry(const H5F_t *f, haddr_t old_addr, haddr_t new_addr)
{
- H5AC_t * cache_ptr;
+ H5AC_t * cache_ptr;
+ H5AC_aux_t * aux_ptr;
hbool_t entry_in_cache;
hbool_t entry_dirty;
size_t entry_size;
- H5AC_aux_t * aux_ptr = NULL;
- H5AC_slist_entry_t * slist_entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f );
- HDassert( f->shared );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
cache_ptr = (H5AC_t *)f->shared->cache;
-
- HDassert( cache_ptr );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
/* get entry status, size, etc here */
- if ( H5C_get_entry_status(f, old_addr, &entry_size, &entry_in_cache,
- &entry_dirty, NULL, NULL, NULL, NULL) < 0 ) {
-
+ if(H5C_get_entry_status(f, old_addr, &entry_size, &entry_in_cache,
+ &entry_dirty, NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get entry status.")
-
- } else if ( ! entry_in_cache ) {
-
- HDassert( entry_in_cache );
+ if(!entry_in_cache)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry not in cache.")
- }
- if ( aux_ptr->mpi_rank == 0 ) {
+ if(aux_ptr->mpi_rank == 0) {
+ H5AC_slist_entry_t * slist_entry_ptr;
- HDassert( aux_ptr->d_slist_ptr != NULL );
- HDassert( aux_ptr->c_slist_ptr != NULL );
+ HDassert(aux_ptr->d_slist_ptr != NULL);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
/* if the entry appears in the cleaned entry slist, under its old
* address, remove it.
*/
- if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
- H5SL_search(aux_ptr->c_slist_ptr, (void *)(&old_addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic ==
- H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
- HDassert( slist_entry_ptr->addr == old_addr );
-
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&old_addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from cleaned entry slist.")
- }
-
- slist_entry_ptr->magic = 0;
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&old_addr))))
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
- aux_ptr->c_slist_len -= 1;
-
- HDassert( aux_ptr->c_slist_len >= 0 );
- }
-
/* if the entry appears in the dirtied entry slist under its old
* address, remove it, but don't free it. Set addr to new_addr.
*/
- if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
- H5SL_search(aux_ptr->d_slist_ptr, (void *)(&old_addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic ==
- H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
- HDassert( slist_entry_ptr->addr == old_addr );
-
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&old_addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from dirty entry slist.")
- }
-
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&old_addr))))
slist_entry_ptr->addr = new_addr;
-
- aux_ptr->d_slist_len -= 1;
-
- HDassert( aux_ptr->d_slist_len >= 0 );
-
- } else {
-
+ else {
/* otherwise, allocate a new entry that is ready
* for insertion, and increment dirty_bytes.
*
@@ -3892,17 +3240,10 @@ H5AC_log_moved_entry(const H5F_t *f,
* list under its old address implies that it must have
* been clean to start with.
*/
-
- HDassert( !entry_dirty );
-
- if ( NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)) ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "Can't allocate dirty slist entry .")
- }
-
- slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
- slist_entry_ptr->addr = new_addr;
+ HDassert(!entry_dirty);
+ if(NULL == (slist_entry_ptr = H5FL_MALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate dirty slist entry .")
+ slist_entry_ptr->addr = new_addr;
aux_ptr->dirty_bytes += entry_size;
@@ -3910,43 +3251,29 @@ H5AC_log_moved_entry(const H5F_t *f,
aux_ptr->move_dirty_bytes += entry_size;
aux_ptr->move_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
- }
-
- /* verify that there is no entry at new_addr in the dirty slist */
- if ( H5SL_search(aux_ptr->d_slist_ptr, (void *)(&new_addr)) != NULL ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "dirty slist already contains entry at new_addr.")
- }
+ } /* end else */
/* insert / reinsert the entry in the dirty slist */
- if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
- &(slist_entry_ptr->addr)) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
- "can't insert entry into dirty entry slist.")
- }
-
- aux_ptr->d_slist_len += 1;
-
- } else if ( ! entry_dirty ) {
-
+ if(H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist.")
+ } /* end if */
+ else if(!entry_dirty) {
aux_ptr->dirty_bytes += entry_size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
aux_ptr->move_dirty_bytes += entry_size;
aux_ptr->move_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
- }
+ } /* end else-if */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_log_moved_entry() */
+} /* H5AC__log_moved_entry() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_propagate_and_apply_candidate_list
+ * Function: H5AC__propagate_and_apply_candidate_list
*
* Purpose: Prior to the addition of support for multiple metadata
* write strategies, in PHDF5, only the metadata cache with
@@ -4042,42 +3369,41 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_propagate_and_apply_candidate_list(H5F_t * f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+H5AC__propagate_and_apply_candidate_list(H5F_t *f, hid_t dxpl_id)
{
- int mpi_code;
- int num_candidates = 0;
- haddr_t * candidates_list_ptr = NULL;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
+ haddr_t * candidates_list_ptr = NULL;
+ int mpi_result;
+ int num_candidates = 0;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
/* to prevent "messages from the future" we must synchronize all
* processes before we write any entries.
*/
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
if(aux_ptr->mpi_rank == 0) {
- if(H5AC_broadcast_candidate_list(cache_ptr, &num_candidates, &candidates_list_ptr) < 0)
+ if(H5AC__broadcast_candidate_list(cache_ptr, &num_candidates, &candidates_list_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't broadcast candidate slist.")
- HDassert( aux_ptr->candidate_slist_len == 0 );
+ HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) == 0);
} /* end if */
else {
- if(H5AC_receive_candidate_list(cache_ptr, &num_candidates, &candidates_list_ptr) < 0)
+ if(H5AC__receive_candidate_list(cache_ptr, &num_candidates, &candidates_list_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't receive candidate broadcast.")
} /* end else */
@@ -4089,55 +3415,59 @@ H5AC_propagate_and_apply_candidate_list(H5F_t * f,
* distributing the writes across the processes.
*/
+ /* Enable writes during this operation */
aux_ptr->write_permitted = TRUE;
- result = H5C_apply_candidate_list(f,
- dxpl_id,
- dxpl_id,
- cache_ptr,
- num_candidates,
- candidates_list_ptr,
- aux_ptr->mpi_rank,
- aux_ptr->mpi_size);
+ /* Apply the candidate list */
+ result = H5C_apply_candidate_list(f, dxpl_id, dxpl_id, cache_ptr, num_candidates,
+ candidates_list_ptr, aux_ptr->mpi_rank, aux_ptr->mpi_size);
+ /* Disable writes again */
aux_ptr->write_permitted = FALSE;
+ /* Check for error on the write operation */
if(result < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't apply candidate list.")
- if(aux_ptr->write_done != NULL)
+ /* this code exists primarily for the test bed -- it allows us to
+ * enforce posix semantics on the server that pretends to be a
+ * file system in our parallel tests.
+ */
+ if(aux_ptr->write_done)
(aux_ptr->write_done)();
/* to prevent "messages from the past" we must synchronize all
* processes again before we go on.
*/
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 2", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
- if(aux_ptr->mpi_rank == 0) {
- if(H5AC_tidy_cache_0_lists(cache_ptr, num_candidates, candidates_list_ptr) < 0)
+ /* if this is process zero, tidy up the dirtied,
+ * and flushed and still clean lists.
+ */
+ if(aux_ptr->mpi_rank == 0)
+ if(H5AC__tidy_cache_0_lists(cache_ptr, num_candidates, candidates_list_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't tidy up process 0 lists.")
- } /* end if */
} /* end if */
/* if it is defined, call the sync point done callback. Note
* that this callback is defined purely for testing purposes,
* and should be undefined under normal operating circumstances.
*/
- if(aux_ptr->sync_point_done != NULL)
+ if(aux_ptr->sync_point_done)
(aux_ptr->sync_point_done)(num_candidates, candidates_list_ptr);
done:
- if(candidates_list_ptr != NULL)
+ if(candidates_list_ptr)
candidates_list_ptr = (haddr_t *)H5MM_xfree((void *)candidates_list_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_propagate_and_apply_candidate_list() */
+} /* H5AC__propagate_and_apply_candidate_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_propagate_flushed_and_still_clean_entries_list
+ * Function: H5AC__propagate_flushed_and_still_clean_entries_list
*
* Purpose: In PHDF5, if the process 0 only metadata write strategy
* is selected, only the metadata cache with mpi rank 0 is
@@ -4209,47 +3539,48 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+H5AC__propagate_flushed_and_still_clean_entries_list(H5F_t *f, hid_t dxpl_id)
{
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
HDassert(aux_ptr != NULL);
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- HDassert(aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
if(aux_ptr->mpi_rank == 0) {
- if(H5AC_broadcast_clean_list(cache_ptr) < 0)
+ if(H5AC__broadcast_clean_list(cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't broadcast clean slist.")
- HDassert( aux_ptr->c_slist_len == 0 );
+ HDassert(H5SL_count(aux_ptr->c_slist_ptr) == 0);
} /* end if */
else {
- if(H5AC_receive_and_apply_clean_list(f, dxpl_id, H5AC_dxpl_id, cache_ptr) < 0)
+ if(H5AC__receive_and_apply_clean_list(f, dxpl_id, H5AC_dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't receive and/or process clean slist broadcast.")
} /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_propagate_flushed_and_still_clean_entries_list() */
+} /* H5AC__propagate_flushed_and_still_clean_entries_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_receive_and_apply_clean_list()
+ * Function: H5AC_receive_haddr_list()
*
- * Purpose: Receive the list of cleaned entries from process 0,
- * and mark the specified entries as clean.
+ * Purpose: Receive the list of entry addresses from process 0,
+ * and return it in a buffer pointed to by *haddr_buf_ptr_ptr.
+ * Note that the caller must free this buffer if it is
+ * returned.
*
* This function must only be called by the process with
* MPI_rank greater than 0.
@@ -4258,104 +3589,132 @@ done:
*
* Return: Non-negative on success/Negative on failure.
*
- * Programmer: John Mainzer, 7/4/05
+ * Programmer: Quincey Koziol, 6/11/2015
*
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_receive_and_apply_clean_list(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- H5AC_t * cache_ptr)
+H5AC__receive_haddr_list(MPI_Comm mpi_comm, int *num_entries_ptr,
+ haddr_t **haddr_buf_ptr_ptr)
{
- H5AC_aux_t * aux_ptr;
haddr_t * haddr_buf_ptr = NULL;
- MPI_Offset * MPI_Offset_buf_ptr = NULL;
int mpi_result;
- int num_entries = 0;
+ int num_entries;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
- HDassert( f != NULL );
- HDassert( f->shared->cache == cache_ptr );
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
- aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->mpi_rank != 0 );
+ /* Sanity checks */
+ HDassert(num_entries_ptr != NULL);
+ HDassert(*num_entries_ptr == 0);
+ HDassert(haddr_buf_ptr_ptr != NULL);
+ HDassert(*haddr_buf_ptr_ptr == NULL);
/* First receive the number of entries in the list so that we
* can set up a buffer to receive them. If there aren't
* any, we are done.
*/
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
if(num_entries > 0) {
size_t buf_size;
- int i;
/* allocate buffers to store the list of entry base addresses in */
- buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
- if(NULL == (MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for receive buffer")
- if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * (size_t)num_entries)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for haddr buffer")
-
- /* Now receive the list of cleaned entries
- *
- * The peculiar structure of the following call to MPI_Bcast is
- * due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
- * buf_size computed above.
- */
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer")
- /* translate the MPI_Offsets to haddr_t */
- i = 0;
- while(i < num_entries) {
- haddr_buf_ptr[i] = H5FD_mpi_MPIOff_to_haddr(MPI_Offset_buf_ptr[i]);
+ /* Now receive the list of candidate entries */
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)haddr_buf_ptr, (int)buf_size, MPI_BYTE, 0, mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
+ } /* end if */
- if(haddr_buf_ptr[i] == HADDR_UNDEF)
- HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert MPI off to haddr")
+ /* finally, pass the number of entries and the buffer pointer
+ * back to the caller.
+ */
+ *num_entries_ptr = num_entries;
+ *haddr_buf_ptr_ptr = haddr_buf_ptr;
- i++;
- } /* end while */
+done:
+ if(ret_value < 0)
+ if(haddr_buf_ptr)
+ haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_receive_haddr_list() */
+#endif /* H5_HAVE_PARALLEL */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC__receive_and_apply_clean_list()
+ *
+ * Purpose: Receive the list of cleaned entries from process 0,
+ * and mark the specified entries as clean.
+ *
+ * This function must only be called by the process with
+ * MPI_rank greater than 0.
+ *
+ * Return SUCCEED on success, and FAIL on failure.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 7/4/05
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id)
+{
+ H5AC_t * cache_ptr;
+ H5AC_aux_t * aux_ptr;
+ haddr_t * haddr_buf_ptr = NULL;
+ int num_entries = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank != 0);
+
+ /* Retrieve the clean list from process 0 */
+ if(H5AC__receive_haddr_list(aux_ptr->mpi_comm, &num_entries, &haddr_buf_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't receive clean list")
+
+ if(num_entries > 0)
/* mark the indicated entries as clean */
- if(H5C_mark_entries_as_clean(f, primary_dxpl_id, secondary_dxpl_id,
- (int32_t)num_entries, &(haddr_buf_ptr[0])) < 0)
+ if(H5C_mark_entries_as_clean(f, primary_dxpl_id, secondary_dxpl_id, (int32_t)num_entries, haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't mark entries clean.")
- } /* end if */
/* if it is defined, call the sync point done callback. Note
* that this callback is defined purely for testing purposes,
* and should be undefined under normal operating circumstances.
*/
- if(aux_ptr->sync_point_done != NULL)
+ if(aux_ptr->sync_point_done)
(aux_ptr->sync_point_done)(num_entries, haddr_buf_ptr);
done:
- if(MPI_Offset_buf_ptr != NULL)
- MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
- if(haddr_buf_ptr != NULL)
+ if(haddr_buf_ptr)
haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_receive_and_apply_clean_list() */
+} /* H5AC__receive_and_apply_clean_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_receive_candidate_list()
+ * Function: H5AC__receive_candidate_list()
*
* Purpose: Receive the list of candidate entries from process 0,
* and return it in a buffer pointed to by *haddr_buf_ptr_ptr.
@@ -4375,105 +3734,39 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_receive_candidate_list(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr)
+H5AC__receive_candidate_list(const H5AC_t *cache_ptr, int *num_entries_ptr,
+ haddr_t **haddr_buf_ptr_ptr)
{
- hbool_t success = FALSE;
H5AC_aux_t * aux_ptr;
- haddr_t * haddr_buf_ptr = NULL;
- MPI_Offset * MPI_Offset_buf_ptr = NULL;
- int mpi_result;
- int num_entries;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank != 0);
+ HDassert(aux_ptr-> metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert(num_entries_ptr != NULL);
+ HDassert(*num_entries_ptr == 0);
+ HDassert(haddr_buf_ptr_ptr != NULL);
+ HDassert(*haddr_buf_ptr_ptr == NULL);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->mpi_rank != 0 );
- HDassert( aux_ptr-> metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
-
- HDassert( num_entries_ptr != NULL );
- HDassert( *num_entries_ptr == 0 );
-
- HDassert( haddr_buf_ptr_ptr != NULL );
- HDassert( *haddr_buf_ptr_ptr == NULL );
-
-
- /* First receive the number of entries in the list so that we
- * can set up a buffer to receive them. If there aren't
- * any, we are done.
- */
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
-
- if(num_entries > 0) {
- size_t buf_size;
- int i;
-
- /* allocate buffers to store the list of entry base addresses in */
- buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
-
- if(NULL == (MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for receive buffer")
- if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * (size_t)num_entries)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for haddr buffer")
-
- /* Now receive the list of candidate entries
- *
- * The peculiar structure of the following call to MPI_Bcast is
- * due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
- * buf_size computed above.
- */
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
-
- /* translate the MPI_Offsets to haddr_t */
- i = 0;
- while(i < num_entries) {
- haddr_buf_ptr[i] = H5FD_mpi_MPIOff_to_haddr(MPI_Offset_buf_ptr[i]);
-
- if(haddr_buf_ptr[i] == HADDR_UNDEF)
- HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert MPI off to haddr")
-
- i++;
- } /* end while */
- } /* end if */
-
- success = TRUE;
+ /* Retrieve the candidate list from process 0 */
+ if(H5AC__receive_haddr_list(aux_ptr->mpi_comm, num_entries_ptr, haddr_buf_ptr_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't receive clean list")
done:
- if(MPI_Offset_buf_ptr != NULL)
- MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
-
- if(success) {
- /* finally, pass the number of entries and the buffer pointer
- * back to the caller. Do this so that we can use the same code
- * to apply the candidate list to all the processes.
- */
- *num_entries_ptr = num_entries;
- *haddr_buf_ptr_ptr = haddr_buf_ptr;
- } /* end if */
- else {
- if(haddr_buf_ptr != NULL)
- haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
- } /* end else */
-
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_receive_candidate_list() */
+} /* H5AC__receive_candidate_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_rsp__dist_md_write__flush
+ * Function: H5AC__rsp__dist_md_write__flush
*
* Purpose: Routine for handling the details of running a sync point
* that is triggered by a flush -- which in turn must have been
@@ -4522,31 +3815,27 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_rsp__dist_md_write__flush(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+static herr_t
+H5AC__rsp__dist_md_write__flush(H5F_t *f, hid_t dxpl_id)
{
- int mpi_code;
- int num_entries = 0;
- haddr_t * haddr_buf_ptr = NULL;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
+ haddr_t * haddr_buf_ptr = NULL;
+ int mpi_result;
+ int num_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f != NULL );
- HDassert( f->shared->cache == cache_ptr );
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
/* first construct the candidate list -- initially, this will be in the
* form of a skip list. We will convert it later.
@@ -4554,33 +3843,30 @@ H5AC_rsp__dist_md_write__flush(H5F_t *f,
if(H5C_construct_candidate_list__clean_cache(cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate list.")
- if(aux_ptr->candidate_slist_len > 0) {
+ if(H5SL_count(aux_ptr->candidate_slist_ptr) > 0) {
herr_t result;
/* convert the candidate list into the format we
* are used to receiving from process 0.
*/
- if(H5AC_copy_candidate_list_to_buffer(cache_ptr, &num_entries, &haddr_buf_ptr, NULL, NULL) < 0)
+ if(H5AC__copy_candidate_list_to_buffer(cache_ptr, &num_entries, &haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer.")
/* initial sync point barrier */
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
- /* apply the candidate list */
+ /* Enable writes during this operation */
aux_ptr->write_permitted = TRUE;
- result = H5C_apply_candidate_list(f,
- dxpl_id,
- dxpl_id,
- cache_ptr,
- num_entries,
- haddr_buf_ptr,
- aux_ptr->mpi_rank,
- aux_ptr->mpi_size);
+ /* Apply the candidate list */
+ result = H5C_apply_candidate_list(f, dxpl_id, dxpl_id, cache_ptr, num_entries,
+ haddr_buf_ptr, aux_ptr->mpi_rank, aux_ptr->mpi_size);
+ /* Disable writes again */
aux_ptr->write_permitted = FALSE;
+ /* Check for error on the write operation */
if(result < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't apply candidate list.")
@@ -4588,40 +3874,39 @@ H5AC_rsp__dist_md_write__flush(H5F_t *f,
* enforce posix semantics on the server that pretends to be a
* file system in our parallel tests.
*/
- if(aux_ptr->write_done != NULL)
+ if(aux_ptr->write_done)
(aux_ptr->write_done)();
/* final sync point barrier */
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
/* if this is process zero, tidy up the dirtied,
* and flushed and still clean lists.
*/
- if(aux_ptr->mpi_rank == 0) {
- if(H5AC_tidy_cache_0_lists(cache_ptr, num_entries, haddr_buf_ptr) < 0)
+ if(aux_ptr->mpi_rank == 0)
+ if(H5AC__tidy_cache_0_lists(cache_ptr, num_entries, haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't tidy up process 0 lists.")
- } /* end if */
} /* end if */
/* if it is defined, call the sync point done callback. Note
* that this callback is defined purely for testing purposes,
* and should be undefined under normal operating circumstances.
*/
- if(aux_ptr->sync_point_done != NULL)
+ if(aux_ptr->sync_point_done)
(aux_ptr->sync_point_done)(num_entries, haddr_buf_ptr);
done:
- if(haddr_buf_ptr != NULL)
+ if(haddr_buf_ptr)
haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_rsp__dist_md_write__flush() */
+} /* H5AC__rsp__dist_md_write__flush() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_rsp__dist_md_write__flush_to_min_clean
+ * Function: H5AC__rsp__dist_md_write__flush_to_min_clean
*
* Purpose: Routine for handling the details of running a sync point
* triggered by the accumulation of dirty metadata (as
@@ -4675,29 +3960,25 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_rsp__dist_md_write__flush_to_min_clean(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+static herr_t
+H5AC__rsp__dist_md_write__flush_to_min_clean(H5F_t *f, hid_t dxpl_id)
{
- hbool_t evictions_enabled;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
+ hbool_t evictions_enabled;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f != NULL );
- HDassert( f->shared->cache == cache_ptr );
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
/* Query if evictions are allowed */
if(H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled) < 0)
@@ -4705,24 +3986,23 @@ H5AC_rsp__dist_md_write__flush_to_min_clean(H5F_t *f,
if(evictions_enabled) {
/* construct candidate list -- process 0 only */
- if(aux_ptr->mpi_rank == 0) {
- if(H5AC_construct_candidate_list(cache_ptr, aux_ptr, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ if(aux_ptr->mpi_rank == 0)
+ if(H5AC__construct_candidate_list(cache_ptr, aux_ptr, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate list.")
- } /* mpi rank == 0 */
/* propagate and apply candidate list -- all processes */
- if(H5AC_propagate_and_apply_candidate_list(f, dxpl_id, cache_ptr) < 0)
+ if(H5AC__propagate_and_apply_candidate_list(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate and apply candidate list.")
} /* evictions enabled */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_rsp__dist_md_write__flush_to_min_clean() */
+} /* H5AC__rsp__dist_md_write__flush_to_min_clean() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_rsp__p0_only__flush
+ * Function: H5AC__rsp__p0_only__flush
*
* Purpose: Routine for handling the details of running a sync point
* that is triggered a flush -- which in turn must have been
@@ -4758,67 +4038,70 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_rsp__p0_only__flush(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+static herr_t
+H5AC__rsp__p0_only__flush(H5F_t *f, hid_t dxpl_id)
{
- int mpi_code;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
+ int mpi_result;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f != NULL );
- HDassert( f->shared->cache == cache_ptr );
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY );
-
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
/* to prevent "messages from the future" we must
* synchronize all processes before we start the flush.
* Hence the following barrier.
*/
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
/* Flush data to disk, from rank 0 process */
if(aux_ptr->mpi_rank == 0) {
herr_t result;
+ /* Enable writes during this operation */
aux_ptr->write_permitted = TRUE;
+ /* Flush the cache */
result = H5C_flush_cache(f, dxpl_id, dxpl_id, H5AC__NO_FLAGS_SET);
+ /* Disable writes again */
aux_ptr->write_permitted = FALSE;
+ /* Check for error on the write operation */
if(result < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
- if(aux_ptr->write_done != NULL)
+ /* this code exists primarily for the test bed -- it allows us to
+ * enforce posix semantics on the server that pretends to be a
+ * file system in our parallel tests.
+ */
+ if(aux_ptr->write_done)
(aux_ptr->write_done)();
} /* end if */
/* Propagate cleaned entries to other ranks. */
- if(H5AC_propagate_flushed_and_still_clean_entries_list(f, H5AC_dxpl_id, cache_ptr) < 0)
+ if(H5AC__propagate_flushed_and_still_clean_entries_list(f, H5AC_dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_rsp__p0_only__flush() */
+} /* H5AC__rsp__p0_only__flush() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_rsp__p0_only__flush_to_min_clean
+ * Function: H5AC__rsp__p0_only__flush_to_min_clean
*
* Purpose: Routine for handling the details of running a sync point
* triggered by the accumulation of dirty metadata (as
@@ -4860,29 +4143,25 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+static herr_t
+H5AC__rsp__p0_only__flush_to_min_clean(H5F_t *f, hid_t dxpl_id)
{
- hbool_t evictions_enabled;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
+ hbool_t evictions_enabled;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f != NULL );
- HDassert( f->shared->cache == cache_ptr );
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
/* Query if evictions are allowed */
if(H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled) < 0)
@@ -4896,14 +4175,14 @@ H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
* Otherwise, do nothing.
*/
if(evictions_enabled) {
- int mpi_code;
+ int mpi_result;
/* to prevent "messages from the future" we must synchronize all
* processes before we start the flush. This synchronization may
* already be done -- hence the do_barrier parameter.
*/
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
if(0 == aux_ptr->mpi_rank) {
herr_t result;
@@ -4913,12 +4192,17 @@ H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
* Note that it is quite possible that no entries will be
* flushed.
*/
+
+ /* Enable writes during this operation */
aux_ptr->write_permitted = TRUE;
+ /* Flush the cache */
result = H5C_flush_to_min_clean(f, dxpl_id, H5AC_dxpl_id);
+ /* Disable writes again */
aux_ptr->write_permitted = FALSE;
+ /* Check for error on the write operation */
if(result < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_flush_to_min_clean() failed.")
@@ -4926,22 +4210,22 @@ H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
* to enforce POSIX semantics on the process used to simulate
* reads and writes in t_cache.c.
*/
- if(aux_ptr->write_done != NULL)
+ if(aux_ptr->write_done)
(aux_ptr->write_done)();
} /* end if */
- if(H5AC_propagate_flushed_and_still_clean_entries_list(f, dxpl_id, cache_ptr) < 0)
+ if(H5AC__propagate_flushed_and_still_clean_entries_list(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
} /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_rsp__p0_only__flush_to_min_clean() */
+} /* H5AC__rsp__p0_only__flush_to_min_clean() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_run_sync_point
+ * Function: H5AC__run_sync_point
*
* Purpose: Top level routine for managing a sync point between all
* meta data caches in the parallel case. Since all caches
@@ -4972,56 +4256,49 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_run_sync_point(H5F_t *f,
- hid_t dxpl_id,
- int sync_point_op)
+static herr_t
+H5AC__run_sync_point(H5F_t *f, hid_t dxpl_id, int sync_point_op)
{
- H5AC_t * cache_ptr;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f != NULL );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
cache_ptr = f->shared->cache;
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
-
- HDassert( ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN ) ||
- ( sync_point_op == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED ) );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) ||
+ (sync_point_op == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED));
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- HDfprintf(stdout,
- "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu/%u\n",
- aux_ptr->mpi_rank,
- aux_ptr->dirty_bytes_propagations,
- aux_ptr->unprotect_dirty_bytes,
- aux_ptr->unprotect_dirty_bytes_updates,
- aux_ptr->insert_dirty_bytes,
- aux_ptr->insert_dirty_bytes_updates,
- aux_ptr->rename_dirty_bytes,
- aux_ptr->rename_dirty_bytes_updates);
+HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu/%u\n",
+ aux_ptr->mpi_rank,
+ aux_ptr->dirty_bytes_propagations,
+ aux_ptr->unprotect_dirty_bytes,
+ aux_ptr->unprotect_dirty_bytes_updates,
+ aux_ptr->insert_dirty_bytes,
+ aux_ptr->insert_dirty_bytes_updates,
+ aux_ptr->rename_dirty_bytes,
+ aux_ptr->rename_dirty_bytes_updates);
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
switch(aux_ptr->metadata_write_strategy) {
case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
switch(sync_point_op) {
case H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN:
- if(H5AC_rsp__p0_only__flush_to_min_clean(f, dxpl_id, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__p0_only__flush_to_min_clean() failed.")
+ if(H5AC__rsp__p0_only__flush_to_min_clean(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC__rsp__p0_only__flush_to_min_clean() failed.")
break;
case H5AC_SYNC_POINT_OP__FLUSH_CACHE:
- if(H5AC_rsp__p0_only__flush(f, dxpl_id, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__p0_only__flush() failed.")
+ if(H5AC__rsp__p0_only__flush(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC__rsp__p0_only__flush() failed.")
break;
default:
@@ -5033,13 +4310,13 @@ H5AC_run_sync_point(H5F_t *f,
case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
switch(sync_point_op) {
case H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN:
- if(H5AC_rsp__dist_md_write__flush_to_min_clean(f, dxpl_id, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__dist_md_write__flush() failed.")
+ if(H5AC__rsp__dist_md_write__flush_to_min_clean(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC__rsp__dist_md_write__flush_to_min_clean() failed.")
break;
case H5AC_SYNC_POINT_OP__FLUSH_CACHE:
- if(H5AC_rsp__dist_md_write__flush(f, dxpl_id, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__dist_md_write__flush() failed.")
+ if(H5AC__rsp__dist_md_write__flush(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC__rsp__dist_md_write__flush() failed.")
break;
default:
@@ -5068,12 +4345,12 @@ H5AC_run_sync_point(H5F_t *f,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_run_sync_point() */
+} /* H5AC__run_sync_point() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_tidy_cache_0_lists()
+ * Function: H5AC__tidy_cache_0_lists()
*
* Purpose: In the distributed metadata write strategy, not all dirty
* entries are written by process 0 -- thus we must tidy
@@ -5109,29 +4386,24 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_tidy_cache_0_lists(H5AC_t * cache_ptr,
- int num_candidates,
- haddr_t * candidates_list_ptr)
-
+H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, int num_candidates,
+ haddr_t *candidates_list_ptr)
{
- int i;
H5AC_aux_t * aux_ptr;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
+ int i;
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC_NOERR
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
- HDassert( aux_ptr->mpi_rank == 0 );
- HDassert( num_candidates > 0 );
- HDassert( candidates_list_ptr != NULL );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert(aux_ptr->mpi_rank == 0);
+ HDassert(num_candidates > 0);
+ HDassert(candidates_list_ptr != NULL);
/* clean up dirtied and flushed and still clean lists by removing
* all entries on the candidate list. Cleared entries should
@@ -5147,50 +4419,22 @@ H5AC_tidy_cache_0_lists(H5AC_t * cache_ptr,
addr = candidates_list_ptr[i];
- /* addr must be either on the dirtied list, or on the flushed
+ /* addr may be either on the dirtied list, or on the flushed
* and still clean list. Remove it.
*/
- d_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->d_slist_ptr, (void *)&addr);
- if(d_slist_entry_ptr != NULL) {
- HDassert(d_slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert(d_slist_entry_ptr->addr == addr);
-
- if(H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr)) != d_slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from dirty entry slist.")
-
- d_slist_entry_ptr->magic = 0;
+ if(NULL != (d_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->d_slist_ptr, (void *)&addr)))
d_slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, d_slist_entry_ptr);
-
- aux_ptr->d_slist_len -= 1;
-
- HDassert(aux_ptr->d_slist_len >= 0);
- } /* end if */
-
- c_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->c_slist_ptr, (void *)&addr);
- if(c_slist_entry_ptr != NULL) {
- HDassert(c_slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert(c_slist_entry_ptr->addr == addr);
-
- if(H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr)) != c_slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from clean entry slist.")
-
- c_slist_entry_ptr->magic = 0;
+ if(NULL != (c_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->c_slist_ptr, (void *)&addr)))
c_slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, c_slist_entry_ptr);
-
- aux_ptr->c_slist_len -= 1;
-
- HDassert( aux_ptr->c_slist_len >= 0 );
- } /* end if */
} /* end for */
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_tidy_cache_0_lists() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5AC__tidy_cache_0_lists() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_flush_entries
+ * Function: H5AC__flush_entries
*
* Purpose: Flush the metadata cache associated with the specified file,
* only writing from rank 0, but propagating the cleaned entries
@@ -5206,25 +4450,25 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_flush_entries(H5F_t *f)
+static herr_t
+H5AC__flush_entries(H5F_t *f, hid_t dxpl_id)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared->cache);
/* Check if we have >1 ranks */
- if(f->shared->cache->aux_ptr) {
- if(H5AC_run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_CACHE) < 0)
+ if(f->shared->cache->aux_ptr)
+ if(H5AC__run_sync_point(f, dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_CACHE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
- } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_flush_entries() */
+} /* H5AC__flush_entries() */
#endif /* H5_HAVE_PARALLEL */
@@ -5244,15 +4488,13 @@ done:
*------------------------------------------------------------------------------
*/
herr_t
-H5AC_ignore_tags(H5F_t * f)
+H5AC_ignore_tags(const H5F_t *f)
{
- /* Variable Declarations */
herr_t ret_value = SUCCEED;
- /* Function Enter Macro */
FUNC_ENTER_NOAPI(FAIL)
- /* Assertions */
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
@@ -5279,28 +4521,25 @@ done:
*------------------------------------------------------------------------------
*/
herr_t
-H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t * prev_tag)
+H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t *prev_tag)
{
- /* Variable Declarations */
- H5P_genplist_t *dxpl; /* dataset transfer property list */
+ H5P_genplist_t *dxpl; /* dataset transfer property list */
herr_t ret_value = SUCCEED;
- /* Function Enter Macro */
FUNC_ENTER_NOAPI(FAIL)
/* Check Arguments */
if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "not a property list")
- /* Get the current tag value and return that (if prev_tag is NOT null)*/
- if(prev_tag) {
+ /* Get the current tag value and return that (if prev_tag is NOT null) */
+ if(prev_tag)
if((H5P_get(dxpl, "H5AC_metadata_tag", prev_tag)) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to query dxpl")
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "unable to query dxpl")
/* Set the provided tag value in the dxpl_id. */
if(H5P_set(dxpl, "H5AC_metadata_tag", &metadata_tag) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set property in dxpl")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set property in dxpl")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -5322,21 +4561,17 @@ done:
*------------------------------------------------------------------------------
*/
herr_t
-H5AC_retag_copied_metadata(H5F_t * f, haddr_t metadata_tag)
+H5AC_retag_copied_metadata(const H5F_t *f, haddr_t metadata_tag)
{
- herr_t ret_value = SUCCEED;
-
- /* Function Enter Macro */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
- /* Assertions */
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
/* Call cache-level function to retag entries */
H5C_retag_copied_metadata(f->shared->cache, metadata_tag);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5AC_retag_copied_metadata */
diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h
index 5b8ac86..950bdea 100644
--- a/src/H5ACpkg.h
+++ b/src/H5ACpkg.h
@@ -297,11 +297,6 @@
* To reitterate, this field is only used on process 0 -- it
* should be NULL on all other processes.
*
- * d_slist_len: Integer field containing the number of entries in the
- * dirty entry list. This field should always contain the
- * value 0 on all processes other than process 0. It exists
- * primarily for sanity checking.
- *
* c_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
* of entries that were dirty, have been flushed
* to disk since the last clean entries broadcast, and are
@@ -312,11 +307,6 @@
* the next clean entries broadcast. The list emptied after
* each broadcast.
*
- * c_slist_len: Integer field containing the number of entries in the clean
- * entries list (*c_slist_ptr). This field should always
- * contain the value 0 on all processes other than process 0.
- * It exists primarily for sanity checking.
- *
* The following two fields are used only when metadata_write_strategy
* is H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED.
*
@@ -325,9 +315,6 @@
* point. This list is then broadcast to the other processes,
* which then either flush or mark clean all entries on it.
*
- * candidate_slist_len: Integer field containing the number of entries on the
- * candidate list. It exists primarily for sanity checking.
- *
* write_done: In the parallel test bed, it is necessary to ensure that
* all writes to the server process from cache 0 complete
* before it enters the barrier call with the other caches.
@@ -394,16 +381,10 @@ typedef struct H5AC_aux_t
H5SL_t * d_slist_ptr;
- int32_t d_slist_len;
-
H5SL_t * c_slist_ptr;
- int32_t c_slist_len;
-
H5SL_t * candidate_slist_ptr;
- int32_t candidate_slist_len;
-
void (* write_done)(void);
void (* sync_point_done)(int num_writes,
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index e259a24..a4e7c5a 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -363,8 +363,6 @@ H5_DLL herr_t H5AC_set_sync_point_done_callback(H5C_t *cache_ptr,
void (*sync_point_done)(int num_writes, haddr_t *written_entries_tbl));
H5_DLL herr_t H5AC_set_write_done_callback(H5C_t * cache_ptr,
void (* write_done)(void));
-H5_DLL herr_t H5AC_stats(const H5F_t *f);
-H5_DLL herr_t H5AC_dump_cache(const H5F_t *f);
H5_DLL herr_t H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr,
@@ -374,27 +372,24 @@ H5_DLL herr_t H5AC_reset_cache_hit_rate_stats(H5AC_t *cache_ptr);
H5_DLL herr_t H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr,
H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t *config_ptr);
-
H5_DLL herr_t H5AC_close_trace_file(H5AC_t *cache_ptr);
H5_DLL herr_t H5AC_open_trace_file(H5AC_t *cache_ptr, const char *trace_file_name);
-H5_DLL herr_t H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t * prev_tag);
-H5_DLL herr_t H5AC_retag_copied_metadata(H5F_t * f, haddr_t metadata_tag);
-H5_DLL herr_t H5AC_ignore_tags(H5F_t * f);
+H5_DLL herr_t H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t *prev_tag);
+H5_DLL herr_t H5AC_retag_copied_metadata(const H5F_t *f, haddr_t metadata_tag);
+H5_DLL herr_t H5AC_ignore_tags(const H5F_t *f);
#ifdef H5_HAVE_PARALLEL
H5_DLL herr_t H5AC_add_candidate(H5AC_t * cache_ptr, haddr_t addr);
#endif /* H5_HAVE_PARALLEL */
#ifndef NDEBUG /* debugging functions */
-
+H5_DLL herr_t H5AC_stats(const H5F_t *f);
+H5_DLL herr_t H5AC_dump_cache(const H5F_t *f);
H5_DLL herr_t H5AC_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr,
- void ** entry_ptr_ptr);
-
+ void ** entry_ptr_ptr);
H5_DLL herr_t H5AC_verify_entry_type(const H5F_t * f, haddr_t addr,
- const H5AC_class_t * expected_type,
- hbool_t * in_cache_ptr,
- hbool_t * type_ok_ptr);
-
+ const H5AC_class_t * expected_type, hbool_t * in_cache_ptr,
+ hbool_t * type_ok_ptr);
#endif /* NDEBUG */ /* end debugging functions */
#endif /* !_H5ACprivate_H */
diff --git a/src/H5C.c b/src/H5C.c
index a5eaa93..5cdd65f 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -1718,10 +1718,10 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t flushed_entries_count;
- size_t flushed_entries_size;
- int64_t initial_slist_len;
- size_t initial_slist_size;
+ int64_t flushed_entries_count = 0;
+ int64_t flushed_entries_size = 0;
+ int64_t initial_slist_len = 0;
+ size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
FUNC_ENTER_NOAPI(FAIL)
@@ -1943,7 +1943,7 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
#if H5C_DO_SANITY_CHECKS
flushed_entries_count++;
- flushed_entries_size += entry_ptr->size;
+ flushed_entries_size += (int64_t)entry_ptr->size;
#endif /* H5C_DO_SANITY_CHECKS */
status = H5C_flush_single_entry(f,
primary_dxpl_id,
@@ -1976,7 +1976,7 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
#if H5C_DO_SANITY_CHECKS
flushed_entries_count++;
- flushed_entries_size += entry_ptr->size;
+ flushed_entries_size += (int64_t)entry_ptr->size;
#endif /* H5C_DO_SANITY_CHECKS */
status = H5C_flush_single_entry(f,
primary_dxpl_id,
@@ -2027,8 +2027,8 @@ end_of_inner_loop:
HDassert( (initial_slist_len + cache_ptr->slist_len_increase -
flushed_entries_count) == cache_ptr->slist_len );
- HDassert( (initial_slist_size +
- (size_t)(cache_ptr->slist_size_increase) -
+ HDassert( (size_t)((int64_t)initial_slist_size +
+ cache_ptr->slist_size_increase -
flushed_entries_size) == cache_ptr->slist_size );
#endif /* H5C_DO_SANITY_CHECKS */
@@ -2127,9 +2127,7 @@ H5C_flush_to_min_clean(H5F_t * f,
if ( cache_ptr->check_write_permitted != NULL ) {
- result = (cache_ptr->check_write_permitted)(f,
- primary_dxpl_id,
- &write_permitted);
+ result = (cache_ptr->check_write_permitted)(f, &write_permitted);
if ( result < 0 ) {
@@ -2569,25 +2567,23 @@ done:
* file logging is turned off), or contain a pointer to the
* open file to which trace file data is to be written.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-NULL trace file pointer (can't fail)
*
* Programmer: John Mainzer
* 1/20/06
*
*-------------------------------------------------------------------------
*/
-herr_t
-H5C_get_trace_file_ptr(const H5C_t *cache_ptr, FILE **trace_file_ptr_ptr)
+FILE *
+H5C_get_trace_file_ptr(const H5C_t *cache_ptr)
{
FUNC_ENTER_NOAPI_NOERR
+ /* Check arguments */
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(trace_file_ptr_ptr);
-
- *trace_file_ptr_ptr = cache_ptr->trace_file_ptr;
- FUNC_LEAVE_NOAPI(SUCCEED)
+ FUNC_LEAVE_NOAPI(cache_ptr->trace_file_ptr)
} /* H5C_get_trace_file_ptr() */
@@ -2600,16 +2596,15 @@ H5C_get_trace_file_ptr(const H5C_t *cache_ptr, FILE **trace_file_ptr_ptr)
* file logging is turned off), or contain a pointer to the
* open file to which trace file data is to be written.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-NULL trace file pointer (can't fail)
*
* Programmer: Quincey Koziol
* 6/9/08
*
*-------------------------------------------------------------------------
*/
-herr_t
-H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr,
- FILE **trace_file_ptr_ptr)
+FILE *
+H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr)
{
FUNC_ENTER_NOAPI_NOERR
@@ -2617,9 +2612,7 @@ H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr,
HDassert(entry_ptr);
HDassert(entry_ptr->cache_ptr);
- H5C_get_trace_file_ptr(entry_ptr->cache_ptr, trace_file_ptr_ptr);
-
- FUNC_LEAVE_NOAPI(SUCCEED)
+ FUNC_LEAVE_NOAPI(H5C_get_trace_file_ptr(entry_ptr->cache_ptr))
} /* H5C_get_trace_file_ptr_from_entry() */
@@ -2820,9 +2813,7 @@ H5C_insert_entry(H5F_t * f,
if ( cache_ptr->check_write_permitted != NULL ) {
- result = (cache_ptr->check_write_permitted)(f,
- primary_dxpl_id,
- &write_permitted);
+ result = (cache_ptr->check_write_permitted)(f, &write_permitted);
if ( result < 0 ) {
@@ -3934,9 +3925,7 @@ H5C_protect(H5F_t * f,
if ( cache_ptr->check_write_permitted != NULL ) {
- result = (cache_ptr->check_write_permitted)(f,
- primary_dxpl_id,
- &write_permitted);
+ result = (cache_ptr->check_write_permitted)(f, &write_permitted);
if ( result < 0 ) {
@@ -4083,9 +4072,7 @@ H5C_protect(H5F_t * f,
if ( cache_ptr->check_write_permitted != NULL ) {
- result = (cache_ptr->check_write_permitted)(f,
- primary_dxpl_id,
- &write_permitted);
+ result = (cache_ptr->check_write_permitted)(f, &write_permitted);
if ( result < 0 ) {
@@ -4660,8 +4647,6 @@ H5C_stats(H5C_t * cache_ptr,
#endif /* H5C_COLLECT_CACHE_STATS */
display_detailed_stats)
{
- herr_t ret_value = SUCCEED; /* Return value */
-
#if H5C_COLLECT_CACHE_STATS
int i;
int64_t total_hits = 0;
@@ -4693,11 +4678,12 @@ H5C_stats(H5C_t * cache_ptr,
size_t aggregate_max_size = 0;
int32_t aggregate_max_pins = 0;
double hit_rate;
- double average_successful_search_depth = 0.0;
- double average_failed_search_depth = 0.0;
- double average_entries_skipped_per_calls_to_msic = 0.0;
- double average_entries_scanned_per_calls_to_msic = 0.0;
+ double average_successful_search_depth = 0.0f;
+ double average_failed_search_depth = 0.0f;
+ double average_entries_skipped_per_calls_to_msic = 0.0f;
+ double average_entries_scanned_per_calls_to_msic = 0.0f;
#endif /* H5C_COLLECT_CACHE_STATS */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -4761,10 +4747,10 @@ H5C_stats(H5C_t * cache_ptr,
if ( ( total_hits > 0 ) || ( total_misses > 0 ) ) {
- hit_rate = 100.0 * ((double)(total_hits)) /
+ hit_rate = (double)100.0f * ((double)(total_hits)) /
((double)(total_hits + total_misses));
} else {
- hit_rate = 0.0;
+ hit_rate = 0.0f;
}
if ( cache_ptr->successful_ht_searches > 0 ) {
@@ -4931,7 +4917,7 @@ H5C_stats(H5C_t * cache_ptr,
HDfprintf(stdout, "%s MSIC: Average/max entries skipped = %lf / %ld\n",
cache_ptr->prefix,
- (float)average_entries_skipped_per_calls_to_msic,
+ (double)average_entries_skipped_per_calls_to_msic,
(long)(cache_ptr->max_entries_skipped_in_msic));
if (cache_ptr->calls_to_msic > 0) {
@@ -4942,7 +4928,7 @@ H5C_stats(H5C_t * cache_ptr,
HDfprintf(stdout, "%s MSIC: Average/max entries scanned = %lf / %ld\n",
cache_ptr->prefix,
- (float)average_entries_scanned_per_calls_to_msic,
+ (double)average_entries_scanned_per_calls_to_msic,
(long)(cache_ptr->max_entries_scanned_in_msic));
HDfprintf(stdout, "%s MSIC: Scanned to make space(evict) = %lld\n",
@@ -4986,10 +4972,10 @@ H5C_stats(H5C_t * cache_ptr,
if ( ( cache_ptr->hits[i] > 0 ) || ( cache_ptr->misses[i] > 0 ) ) {
- hit_rate = 100.0 * ((double)(cache_ptr->hits[i])) /
+ hit_rate = (double)100.0f * ((double)(cache_ptr->hits[i])) /
((double)(cache_ptr->hits[i] + cache_ptr->misses[i]));
} else {
- hit_rate = 0.0;
+ hit_rate = 0.0f;
}
HDfprintf(stdout,
@@ -8242,7 +8228,6 @@ H5C_flush_single_entry(H5F_t * f,
hbool_t was_dirty;
hbool_t destroy_entry;
herr_t status;
- int type_id;
unsigned flush_flags = H5C_CALLBACK__NO_FLAGS_SET;
H5C_cache_entry_t * entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
@@ -8343,7 +8328,6 @@ H5C_flush_single_entry(H5F_t * f,
#endif /* H5_HAVE_PARALLEL */
was_dirty = entry_ptr->is_dirty;
- type_id = entry_ptr->type->id;
entry_ptr->flush_marker = FALSE;
@@ -8611,8 +8595,7 @@ H5C_flush_single_entry(H5F_t * f,
if ( cache_ptr->log_flush ) {
- status = (cache_ptr->log_flush)(cache_ptr, addr, was_dirty,
- flags, type_id);
+ status = (cache_ptr->log_flush)(cache_ptr, addr, was_dirty, flags);
if ( status < 0 ) {
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 7c278e8..5df84cd 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -35,14 +35,16 @@
#ifndef _H5Cpkg_H
#define _H5Cpkg_H
-
/* Get package's private header */
#include "H5Cprivate.h"
-
-/* Get needed headers */
+/* Other private headers needed by this file */
#include "H5SLprivate.h" /* Skip lists */
+/**************************/
+/* Package Private Macros */
+/**************************/
+
/* With the introduction of the fractal heap, it is now possible for
* entries to be dirtied, resized, and/or moved in the flush callbacks.
* As a result, on flushes, it may be necessary to make multiple passes
@@ -53,980 +55,14 @@
*
* -- JRM
*/
-
#define H5C__MAX_PASSES_ON_FLUSH 4
-
-
-/****************************************************************************
- *
- * structure H5C_t
- *
- * Catchall structure for all variables specific to an instance of the cache.
- *
- * While the individual fields of the structure are discussed below, the
- * following overview may be helpful.
- *
- * Entries in the cache are stored in an instance of H5TB_TREE, indexed on
- * the entry's disk address. While the H5TB_TREE is less efficient than
- * hash table, it keeps the entries in address sorted order. As flushes
- * in parallel mode are more efficient if they are issued in increasing
- * address order, this is a significant benefit. Also the H5TB_TREE code
- * was readily available, which reduced development time.
- *
- * While the cache was designed with multiple replacement policies in mind,
- * at present only a modified form of LRU is supported.
- *
- * JRM - 4/26/04
- *
- * Profiling has indicated that searches in the instance of H5TB_TREE are
- * too expensive. To deal with this issue, I have augmented the cache
- * with a hash table in which all entries will be stored. Given the
- * advantages of flushing entries in increasing address order, the TBBT
- * is retained, but only dirty entries are stored in it. At least for
- * now, we will leave entries in the TBBT after they are flushed.
- *
- * Note that index_size and index_len now refer to the total size of
- * and number of entries in the hash table.
- *
- * JRM - 7/19/04
- *
- * The TBBT has since been replaced with a skip list. This change
- * greatly predates this note.
- *
- * JRM - 9/26/05
- *
- * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC.
- * This field is used to validate pointers to instances of
- * H5C_t.
- *
- * flush_in_progress: Boolean flag indicating whether a flush is in
- * progress.
- *
- * trace_file_ptr: File pointer pointing to the trace file, which is used
- * to record cache operations for use in simulations and design
- * studies. This field will usually be NULL, indicating that
- * no trace file should be recorded.
- *
- * Since much of the code supporting the parallel metadata
- * cache is in H5AC, we don't write the trace file from
- * H5C. Instead, H5AC reads the trace_file_ptr as needed.
- *
- * When we get to using H5C in other places, we may add
- * code to write trace file data at the H5C level as well.
- *
- * aux_ptr: Pointer to void used to allow wrapper code to associate
- * its data with an instance of H5C_t. The H5C cache code
- * sets this field to NULL, and otherwise leaves it alone.
- *
- * max_type_id: Integer field containing the maximum type id number assigned
- * to a type of entry in the cache. All type ids from 0 to
- * max_type_id inclusive must be defined. The names of the
- * types are stored in the type_name_table discussed below, and
- * indexed by the ids.
- *
- * type_name_table_ptr: Pointer to an array of pointer to char of length
- * max_type_id + 1. The strings pointed to by the entries
- * in the array are the names of the entry types associated
- * with the indexing type IDs.
- *
- * max_cache_size: Nominal maximum number of bytes that may be stored in the
- * cache. This value should be viewed as a soft limit, as the
- * cache can exceed this value under the following circumstances:
- *
- * a) All entries in the cache are protected, and the cache is
- * asked to insert a new entry. In this case the new entry
- * will be created. If this causes the cache to exceed
- * max_cache_size, it will do so. The cache will attempt
- * to reduce its size as entries are unprotected.
- *
- * b) When running in parallel mode, the cache may not be
- * permitted to flush a dirty entry in response to a read.
- * If there are no clean entries available to evict, the
- * cache will exceed its maximum size. Again the cache
- * will attempt to reduce its size to the max_cache_size
- * limit on the next cache write.
- *
- * c) When an entry increases in size, the cache may exceed
- * the max_cache_size limit until the next time the cache
- * attempts to load or insert an entry.
- *
- * min_clean_size: Nominal minimum number of clean bytes in the cache.
- * The cache attempts to maintain this number of bytes of
- * clean data so as to avoid case b) above. Again, this is
- * a soft limit.
- *
- *
- * In addition to the call back functions required for each entry, the
- * cache requires the following call back functions for this instance of
- * the cache as a whole:
- *
- * check_write_permitted: In certain applications, the cache may not
- * be allowed to write to disk at certain time. If specified,
- * the check_write_permitted function is used to determine if
- * a write is permissible at any given point in time.
- *
- * If no such function is specified (i.e. this field is NULL),
- * the cache uses the following write_permitted field to
- * determine whether writes are permitted.
- *
- * write_permitted: If check_write_permitted is NULL, this boolean flag
- * indicates whether writes are permitted.
- *
- * log_flush: If provided, this function is called whenever a dirty
- * entry is flushed to disk.
- *
- *
- * In cases where memory is plentiful, and performance is an issue, it may
- * be useful to disable all cache evictions, and thereby postpone metadata
- * writes. The following field is used to implement this.
- *
- * evictions_enabled: Boolean flag that is initialized to TRUE. When
- * this flag is set to FALSE, the metadata cache will not
- * attempt to evict entries to make space for newly protected
- * entries, and instead the will grow without limit.
- *
- * Needless to say, this feature must be used with care.
- *
- *
- * The cache requires an index to facilitate searching for entries. The
- * following fields support that index.
- *
- * index_len: Number of entries currently in the hash table used to index
- * the cache.
- *
- * index_size: Number of bytes of cache entries currently stored in the
- * hash table used to index the cache.
- *
- * This value should not be mistaken for footprint of the
- * cache in memory. The average cache entry is small, and
- * the cache has a considerable overhead. Multiplying the
- * index_size by two should yield a conservative estimate
- * of the cache's memory footprint.
- *
- * clean_index_size: Number of bytes of clean entries currently stored in
- * the hash table. Note that the index_size field (above)
- * is also the sum of the sizes of all entries in the cache.
- * Thus we should have the invarient that clean_index_size +
- * dirty_index_size == index_size.
- *
- * WARNING:
- *
- * 1) The clean_index_size field is not maintained by the
- * index macros, as the hash table doesn't care whether
- * the entry is clean or dirty. Instead the field is
- * maintained in the H5C__UPDATE_RP macros.
- *
- * 2) The value of the clean_index_size must not be mistaken
- * for the current clean size of the cache. Rather, the
- * clean size of the cache is the current value of
- * clean_index_size plus the amount of empty space (if any)
- * in the cache.
- *
- * dirty_index_size: Number of bytes of dirty entries currently stored in
- * the hash table. Note that the index_size field (above)
- * is also the sum of the sizes of all entries in the cache.
- * Thus we should have the invarient that clean_index_size +
- * dirty_index_size == index_size.
- *
- * WARNING:
- *
- * 1) The dirty_index_size field is not maintained by the
- * index macros, as the hash table doesn't care whether
- * the entry is clean or dirty. Instead the field is
- * maintained in the H5C__UPDATE_RP macros.
- *
- * index: Array of pointer to H5C_cache_entry_t of size
- * H5C__HASH_TABLE_LEN. At present, this value is a power
- * of two, not the usual prime number.
- *
- * I hope that the variable size of cache elements, the large
- * hash table size, and the way in which HDF5 allocates space
- * will combine to avoid problems with periodicity. If so, we
- * can use a trivial hash function (a bit-and and a 3 bit left
- * shift) with some small savings.
- *
- * If not, it will become evident in the statistics. Changing
- * to the usual prime number length hash table will require
- * changing the H5C__HASH_FCN macro and the deletion of the
- * H5C__HASH_MASK #define. No other changes should be required.
- *
- *
- * When we flush the cache, we need to write entries out in increasing
- * address order. An instance of a skip list is used to store dirty entries in
- * sorted order. Whether it is cheaper to sort the dirty entries as needed,
- * or to maintain the list is an open question. At a guess, it depends
- * on how frequently the cache is flushed. We will see how it goes.
- *
- * For now at least, I will not remove dirty entries from the list as they
- * are flushed. (this has been changed -- dirty entries are now removed from
- * the skip list as they are flushed. JRM - 10/25/05)
- *
- * slist_len: Number of entries currently in the skip list
- * used to maintain a sorted list of dirty entries in the
- * cache.
- *
- * slist_size: Number of bytes of cache entries currently stored in the
- * skip list used to maintain a sorted list of
- * dirty entries in the cache.
- *
- * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted
- * list of dirty entries in the cache. This sorted list has
- * two uses:
- *
- * a) It allows us to flush dirty entries in increasing address
- * order, which results in significant savings.
- *
- * b) It facilitates checking for adjacent dirty entries when
- * attempting to evict entries from the cache. While we
- * don't use this at present, I hope that this will allow
- * some optimizations when I get to it.
- *
- * num_last_entries: The number of entries in the cache that can only be
- * flushed after all other entries in the cache have
- * been flushed. At this time, this will only ever be
- * one entry (the superblock), and the code has been
- * protected with HDasserts to enforce this. This restraint
- * can certainly be relaxed in the future if the need for
- * multiple entries being flushed last arises, though
- * explicit tests for that case should be added when said
- * HDasserts are removed.
- *
- * With the addition of the fractal heap, the cache must now deal with
- * the case in which entries may be dirtied, moved, or have their sizes
- * changed during a flush. To allow sanity checks in this situation, the
- * following two fields have been added. They are only compiled in when
- * H5C_DO_SANITY_CHECKS is TRUE.
- *
- * slist_len_increase: Number of entries that have been added to the
- * slist since the last time this field was set to zero.
- *
- * slist_size_increase: Total size of all entries that have been added
- * to the slist since the last time this field was set to
- * zero.
- *
- *
- * When a cache entry is protected, it must be removed from the LRU
- * list(s) as it cannot be either flushed or evicted until it is unprotected.
- * The following fields are used to implement the protected list (pl).
- *
- * pl_len: Number of entries currently residing on the protected list.
- *
- * pl_size: Number of bytes of cache entries currently residing on the
- * protected list.
- *
- * pl_head_ptr: Pointer to the head of the doubly linked list of protected
- * entries. Note that cache entries on this list are linked
- * by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected
- * entries. Note that cache entries on this list are linked
- * by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- *
- * For very frequently used entries, the protect/unprotect overhead can
- * become burdensome. To avoid this overhead, I have modified the cache
- * to allow entries to be "pinned". A pinned entry is similar to a
- * protected entry, in the sense that it cannot be evicted, and that
- * the entry can be modified at any time.
- *
- * Pinning an entry has the following implications:
- *
- * 1) A pinned entry cannot be evicted. Thus unprotected
- * pinned entries reside in the pinned entry list, instead
- * of the LRU list(s) (or other lists maintained by the current
- * replacement policy code).
- *
- * 2) A pinned entry can be accessed or modified at any time.
- * Therefore, the cache must check with the entry owner
- * before flushing it. If permission is denied, the
- * cache just skips the entry in the flush.
- *
- * 3) A pinned entry can be marked as dirty (and possibly
- * change size) while it is unprotected.
- *
- * 4) The flush-destroy code must allow pinned entries to
- * be unpinned (and possibly unprotected) during the
- * flush.
- *
- * Since pinned entries cannot be evicted, they must be kept on a pinned
- * entry list (pel), instead of being entrusted to the replacement policy
- * code.
- *
- * Maintaining the pinned entry list requires the following fields:
- *
- * pel_len: Number of entries currently residing on the pinned
- * entry list.
- *
- * pel_size: Number of bytes of cache entries currently residing on
- * the pinned entry list.
- *
- * pel_head_ptr: Pointer to the head of the doubly linked list of pinned
- * but not protected entries. Note that cache entries on
- * this list are linked by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned
- * but not protected entries. Note that cache entries on
- * this list are linked by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- *
- * The cache must have a replacement policy, and the fields supporting this
- * policy must be accessible from this structure.
- *
- * While there has been interest in several replacement policies for
- * this cache, the initial development schedule is tight. Thus I have
- * elected to support only a modified LRU (least recently used) policy
- * for the first cut.
- *
- * To further simplify matters, I have simply included the fields needed
- * by the modified LRU in this structure. When and if we add support for
- * other policies, it will probably be easiest to just add the necessary
- * fields to this structure as well -- we only create one instance of this
- * structure per file, so the overhead is not excessive.
- *
- *
- * Fields supporting the modified LRU policy:
- *
- * See most any OS text for a discussion of the LRU replacement policy.
- *
- * When operating in parallel mode, we must ensure that a read does not
- * cause a write. If it does, the process will hang, as the write will
- * be collective and the other processes will not know to participate.
- *
- * To deal with this issue, I have modified the usual LRU policy by adding
- * clean and dirty LRU lists to the usual LRU list.
- *
- * The clean LRU list is simply the regular LRU list with all dirty cache
- * entries removed.
- *
- * Similarly, the dirty LRU list is the regular LRU list with all the clean
- * cache entries removed.
- *
- * When reading in parallel mode, we evict from the clean LRU list only.
- * This implies that we must try to ensure that the clean LRU list is
- * reasonably well stocked at all times.
- *
- * We attempt to do this by trying to flush enough entries on each write
- * to keep the cLRU_list_size >= min_clean_size.
- *
- * Even if we start with a completely clean cache, a sequence of protects
- * without unprotects can empty the clean LRU list. In this case, the
- * cache must grow temporarily. At the next write, we will attempt to
- * evict enough entries to reduce index_size to less than max_cache_size.
- * While this will usually be possible, all bets are off if enough entries
- * are protected.
- *
- * Discussions of the individual fields used by the modified LRU replacement
- * policy follow:
- *
- * LRU_list_len: Number of cache entries currently on the LRU list.
- *
- * Observe that LRU_list_len + pl_len must always equal
- * index_len.
- *
- * LRU_list_size: Number of bytes of cache entries currently residing on the
- * LRU list.
- *
- * Observe that LRU_list_size + pl_size must always equal
- * index_size.
- *
- * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache
- * entries on this list are linked by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache
- * entries on this list are linked by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * cLRU_list_len: Number of cache entries currently on the clean LRU list.
- *
- * Observe that cLRU_list_len + dLRU_list_len must always
- * equal LRU_list_len.
- *
- * cLRU_list_size: Number of bytes of cache entries currently residing on
- * the clean LRU list.
- *
- * Observe that cLRU_list_size + dLRU_list_size must always
- * equal LRU_list_size.
- *
- * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list.
- * Cache entries on this list are linked by their aux_next and
- * aux_prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list.
- * Cache entries on this list are linked by their aux_next and
- * aux_prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * dLRU_list_len: Number of cache entries currently on the dirty LRU list.
- *
- * Observe that cLRU_list_len + dLRU_list_len must always
- * equal LRU_list_len.
- *
- * dLRU_list_size: Number of cache entries currently on the dirty LRU list.
- *
- * Observe that cLRU_list_len + dLRU_list_len must always
- * equal LRU_list_len.
- *
- * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list.
- * Cache entries on this list are linked by their aux_next and
- * aux_prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list.
- * Cache entries on this list are linked by their aux_next and
- * aux_prev fields.
- *
- * This field is NULL if the list is empty.
- *
- *
- * Automatic cache size adjustment:
- *
- * While the default cache size is adequate for most cases, we can run into
- * cases where the default is too small. Ideally, we will let the user
- * adjust the cache size as required. However, this is not possible in all
- * cases. Thus I have added automatic cache size adjustment code.
- *
- * The configuration for the automatic cache size adjustment is stored in
- * the structure described below:
- *
- * size_increase_possible: Depending on the configuration data given
- * in the resize_ctl field, it may or may not be possible
- * to increase the size of the cache. Rather than test for
- * all the ways this can happen, we simply set this flag when
- * we receive a new configuration.
- *
- * flash_size_increase_possible: Depending on the configuration data given
- * in the resize_ctl field, it may or may not be possible
- * for a flash size increase to occur. We set this flag
- * whenever we receive a new configuration so as to avoid
- * repeated calculations.
- *
- * flash_size_increase_threshold: If a flash cache size increase is possible,
- * this field is used to store the minimum size of a new entry
- * or size increase needed to trigger a flash cache size
- * increase. Note that this field must be updated whenever
- * the size of the cache is changed.
- *
- * size_decrease_possible: Depending on the configuration data given
- * in the resize_ctl field, it may or may not be possible
- * to decrease the size of the cache. Rather than test for
- * all the ways this can happen, we simply set this flag when
- * we receive a new configuration.
- *
- * cache_full: Boolean flag used to keep track of whether the cache is
- * full, so we can refrain from increasing the size of a
- * cache which hasn't used up the space allotted to it.
- *
- * The field is initialized to FALSE, and then set to TRUE
- * whenever we attempt to make space in the cache.
- *
- * resize_enabled: This is another convenience flag which is set whenever
- * a new set of values for resize_ctl are provided. Very
- * simply,
- *
- * resize_enabled = size_increase_possible ||
- * size_decrease_possible;
- *
- * size_decreased: Boolean flag set to TRUE whenever the maximum cache
- * size is decreased. The flag triggers a call to
- * H5C_make_space_in_cache() on the next call to H5C_protect().
- *
- * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
- * data for automatic cache resizing.
- *
- * epoch_markers_active: Integer field containing the number of epoch
- * markers currently in use in the LRU list. This value
- * must be in the range [0, H5C__MAX_EPOCH_MARKERS - 1].
- *
- * epoch_marker_active: Array of boolean of length H5C__MAX_EPOCH_MARKERS.
- * This array is used to track which epoch markers are currently
- * in use.
- *
- * epoch_marker_ringbuf: Array of int of length H5C__MAX_EPOCH_MARKERS + 1.
- *
- * To manage the epoch marker cache entries, it is necessary
- * to track their order in the LRU list. This is done with
- * epoch_marker_ringbuf. When markers are inserted at the
- * head of the LRU list, the index of the marker in the
- * epoch_markers array is inserted at the tail of the ring
- * buffer. When it becomes the epoch_marker_active'th marker
- * in the LRU list, it will have worked its way to the head
- * of the ring buffer as well. This allows us to remove it
- * without scanning the LRU list if such is required.
- *
- * epoch_marker_ringbuf_first: Integer field containing the index of the
- * first entry in the ring buffer.
- *
- * epoch_marker_ringbuf_last: Integer field containing the index of the
- * last entry in the ring buffer.
- *
- * epoch_marker_ringbuf_size: Integer field containing the number of entries
- * in the ring buffer.
- *
- * epoch_markers: Array of instances of H5C_cache_entry_t of length
- * H5C__MAX_EPOCH_MARKERS. The entries are used as markers
- * in the LRU list to identify cache entries that haven't
- * been accessed for some (small) specified number of
- * epochs. These entries (if any) can then be evicted and
- * the cache size reduced -- ideally without evicting any
- * of the current working set. Needless to say, the epoch
- * length and the number of epochs before an unused entry
- * must be chosen so that all, or almost all, the working
- * set will be accessed before the limit.
- *
- * Epoch markers only appear in the LRU list, never in
- * the index or slist. While they are of type
- * H5C__EPOCH_MARKER_TYPE, and have associated class
- * functions, these functions should never be called.
- *
- * The addr fields of these instances of H5C_cache_entry_t
- * are set to the index of the instance in the epoch_markers
- * array, the size is set to 0, and the type field points
- * to the constant structure epoch_marker_class defined
- * in H5C.c. The next and prev fields are used as usual
- * to link the entry into the LRU list.
- *
- * All other fields are unused.
- *
- *
- * Cache hit rate collection fields:
- *
- * We supply the current cache hit rate on request, so we must keep a
- * simple cache hit rate computation regardless of whether statistics
- * collection is enabled. The following fields support this capability.
- *
- * cache_hits: Number of cache hits since the last time the cache hit
- * rate statistics were reset. Note that when automatic cache
- * re-sizing is enabled, this field will be reset every automatic
- * resize epoch.
- *
- * cache_accesses: Number of times the cache has been accessed while
- * since the last since the last time the cache hit rate statistics
- * were reset. Note that when automatic cache re-sizing is enabled,
- * this field will be reset every automatic resize epoch.
- *
- *
- * Statistics collection fields:
- *
- * When enabled, these fields are used to collect statistics as described
- * below. The first set are collected only when H5C_COLLECT_CACHE_STATS
- * is true.
- *
- * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
- * equal to the array index has been in cache when requested in
- * the current epoch.
- *
- * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
- * equal to the array index has not been in cache when
- * requested in the current epoch.
- *
- * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry with
- * type id equal to the array index has been write protected
- * in the current epoch.
- *
- * Observe that (hits + misses) = (write_protects + read_protects).
- *
- * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry with
- * type id equal to the array index has been read protected in
- * the current epoch.
- *
- * Observe that (hits + misses) = (write_protects + read_protects).
- *
- * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to maximum number of simultaneous read
- * protects on any entry with type id equal to the array index
- * in the current epoch.
- *
- * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been inserted into the
- * cache in the current epoch.
- *
- * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
- * with type id equal to the array index has been inserted
- * pinned into the cache in the current epoch.
- *
- * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times a dirty entry with type
- * id equal to the array index has been cleared in the current
- * epoch.
- *
- * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
- * equal to the array index has been written to disk in the
- * current epoch.
- *
- * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
- * equal to the array index has been evicted from the cache in
- * the current epoch.
- *
- * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been moved in the current
- * epoch.
- *
- * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
- * with type id equal to the array index has been moved
- * during its flush callback in the current epoch.
- *
- * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
- * with type id equal to the array index has been moved
- * during a cache flush in the current epoch.
- *
- * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been pinned in the current
- * epoch.
- *
- * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been unpinned in the current
- * epoch.
- *
- * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been marked dirty while pinned
- * in the current epoch.
- *
- * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry
- * with type id equal to the array index has been flushed while
- * pinned in the current epoch.
- *
- * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry
- * with type id equal to the array index has been cleared while
- * pinned in the current epoch.
- *
- * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
- * with type id equal to the array index has increased in
- * size in the current epoch.
- *
- * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
- * with type id equal to the array index has decreased in
- * size in the current epoch.
- *
- * entry_flush_size_changes: Array of int64 of length
- * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
- * the number of times an entry with type id equal to the
- * array index has changed size while in its flush callback.
- *
- * cache_flush_size_changes: Array of int64 of length
- * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
- * the number of times an entry with type id equal to the
- * array index has changed size during a cache flush
- *
- * total_ht_insertions: Number of times entries have been inserted into the
- * hash table in the current epoch.
- *
- * total_ht_deletions: Number of times entries have been deleted from the
- * hash table in the current epoch.
- *
- * successful_ht_searches: int64 containing the total number of successful
- * searches of the hash table in the current epoch.
- *
- * total_successful_ht_search_depth: int64 containing the total number of
- * entries other than the targets examined in successful
- * searches of the hash table in the current epoch.
- *
- * failed_ht_searches: int64 containing the total number of unsuccessful
- * searches of the hash table in the current epoch.
- *
- * total_failed_ht_search_depth: int64 containing the total number of
- * entries examined in unsuccessful searches of the hash
- * table in the current epoch.
- *
- * max_index_len: Largest value attained by the index_len field in the
- * current epoch.
- *
- * max_index_size: Largest value attained by the index_size field in the
- * current epoch.
- *
- * max_clean_index_size: Largest value attained by the clean_index_size field
- * in the current epoch.
- *
- * max_dirty_index_size: Largest value attained by the dirty_index_size field
- * in the current epoch.
- *
- * max_slist_len: Largest value attained by the slist_len field in the
- * current epoch.
- *
- * max_slist_size: Largest value attained by the slist_size field in the
- * current epoch.
- *
- * max_pl_len: Largest value attained by the pl_len field in the
- * current epoch.
- *
- * max_pl_size: Largest value attained by the pl_size field in the
- * current epoch.
- *
- * max_pel_len: Largest value attained by the pel_len field in the
- * current epoch.
- *
- * max_pel_size: Largest value attained by the pel_size field in the
- * current epoch.
- *
- * calls_to_msic: Total number of calls to H5C_make_space_in_cache
- *
- * total_entries_skipped_in_msic: Number of clean entries skipped while
- * enforcing the min_clean_fraction in H5C_make_space_in_cache().
- *
- * total_entries_scanned_in_msic: Number of clean entries skipped while
- * enforcing the min_clean_fraction in H5C_make_space_in_cache().
- *
- * max_entries_skipped_in_msic: Maximum number of clean entries skipped
- * in any one call to H5C_make_space_in_cache().
- *
- * max_entries_scanned_in_msic: Maximum number of entries scanned over
- * in any one call to H5C_make_space_in_cache().
- *
- * entries_scanned_to_make_space: Number of entries scanned only when looking
- * for entries to evict in order to make space in cache.
-
- * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
- * and H5C_COLLECT_CACHE_ENTRY_STATS are true.
- *
- * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times any single
- * entry with type id equal to the array index has been
- * accessed in the current epoch.
- *
- * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the minimum number of times any single
- * entry with type id equal to the array index has been
- * accessed in the current epoch.
- *
- * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times any single
- * entry with type id equal to the array index has been cleared
- * in the current epoch.
- *
- * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times any single
- * entry with type id equal to the array index has been
- * flushed in the current epoch.
- *
- * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum size of any single entry
- * with type id equal to the array index that has resided in
- * the cache in the current epoch.
- *
- * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times that any single
- * entry with type id equal to the array index that has been
- * marked as pinned in the cache in the current epoch.
- *
- *
- * Fields supporting testing:
- *
- * prefix Array of char used to prefix debugging output. The
- * field is intended to allow marking of output of with
- * the processes mpi rank.
- *
- * get_entry_ptr_from_addr_counter: Counter used to track the number of
- * times the H5C_get_entry_ptr_from_addr() function has been
- * called successfully. This field is only defined when
- * NDEBUG is not #defined.
- *
- ****************************************************************************/
-
+/* Cache configuration settings */
#define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */
-
#define H5C__H5C_T_MAGIC 0x005CAC0E
#define H5C__MAX_NUM_TYPE_IDS 27
#define H5C__PREFIX_LEN 32
-struct H5C_t
-{
- uint32_t magic;
-
- hbool_t flush_in_progress;
-
- FILE * trace_file_ptr;
-
- void * aux_ptr;
-
- int32_t max_type_id;
- const char * (* type_name_table_ptr);
-
- size_t max_cache_size;
- size_t min_clean_size;
-
- H5C_write_permitted_func_t check_write_permitted;
- hbool_t write_permitted;
-
- H5C_log_flush_func_t log_flush;
-
- hbool_t evictions_enabled;
-
- int32_t index_len;
- size_t index_size;
- size_t clean_index_size;
- size_t dirty_index_size;
- H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]);
-
- hbool_t ignore_tags;
-
- int32_t slist_len;
- size_t slist_size;
- H5SL_t * slist_ptr;
- int32_t num_last_entries;
-#if H5C_DO_SANITY_CHECKS
- int64_t slist_len_increase;
- int64_t slist_size_increase;
-#endif /* H5C_DO_SANITY_CHECKS */
-
- int32_t pl_len;
- size_t pl_size;
- H5C_cache_entry_t * pl_head_ptr;
- H5C_cache_entry_t * pl_tail_ptr;
-
- int32_t pel_len;
- size_t pel_size;
- H5C_cache_entry_t * pel_head_ptr;
- H5C_cache_entry_t * pel_tail_ptr;
-
- int32_t LRU_list_len;
- size_t LRU_list_size;
- H5C_cache_entry_t * LRU_head_ptr;
- H5C_cache_entry_t * LRU_tail_ptr;
-
- int32_t cLRU_list_len;
- size_t cLRU_list_size;
- H5C_cache_entry_t * cLRU_head_ptr;
- H5C_cache_entry_t * cLRU_tail_ptr;
-
- int32_t dLRU_list_len;
- size_t dLRU_list_size;
- H5C_cache_entry_t * dLRU_head_ptr;
- H5C_cache_entry_t * dLRU_tail_ptr;
-
- hbool_t size_increase_possible;
- hbool_t flash_size_increase_possible;
- size_t flash_size_increase_threshold;
- hbool_t size_decrease_possible;
- hbool_t resize_enabled;
- hbool_t cache_full;
- hbool_t size_decreased;
- H5C_auto_size_ctl_t resize_ctl;
-
- int32_t epoch_markers_active;
- hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS];
- int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1];
- int32_t epoch_marker_ringbuf_first;
- int32_t epoch_marker_ringbuf_last;
- int32_t epoch_marker_ringbuf_size;
- H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS];
-
- int64_t cache_hits;
- int64_t cache_accesses;
-
-#if H5C_COLLECT_CACHE_STATS
-
- /* stats fields */
- int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
-
- int64_t total_ht_insertions;
- int64_t total_ht_deletions;
- int64_t successful_ht_searches;
- int64_t total_successful_ht_search_depth;
- int64_t failed_ht_searches;
- int64_t total_failed_ht_search_depth;
-
- int32_t max_index_len;
- size_t max_index_size;
- size_t max_clean_index_size;
- size_t max_dirty_index_size;
-
- int32_t max_slist_len;
- size_t max_slist_size;
-
- int32_t max_pl_len;
- size_t max_pl_size;
-
- int32_t max_pel_len;
- size_t max_pel_size;
-
- int64_t calls_to_msic;
- int64_t total_entries_skipped_in_msic;
- int64_t total_entries_scanned_in_msic;
- int32_t max_entries_skipped_in_msic;
- int32_t max_entries_scanned_in_msic;
- int64_t entries_scanned_to_make_space;
-
-#if H5C_COLLECT_CACHE_ENTRY_STATS
-
- int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
- size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1];
-
-#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
-
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- char prefix[H5C__PREFIX_LEN];
-
-#ifndef NDEBUG
-
- int64_t get_entry_ptr_from_addr_counter;
-
-#endif /* NDEBUG */
-};
-
-
-/****************************************************************************/
-/***************************** Macro Definitions ****************************/
-/****************************************************************************/
-
/****************************************************************************
*
* We maintain doubly linked lists of instances of H5C_cache_entry_t for a
@@ -1055,11 +91,11 @@ struct H5C_t
*
* from the H5C__DLL_PRE_REMOVE_SC macro. With the addition of the
* epoch markers used in the age out based cache size reduction algorithm,
- * this invarient need not hold, as the epoch markers are of size 0.
+ * this invariant need not hold, as the epoch markers are of size 0.
*
* One could argue that I should have given the epoch markers a positive
* size, but this would break the index_size = LRU_list_size + pl_size
- * + pel_size invarient.
+ * + pel_size invariant.
*
* Alternatively, I could pass the current decr_mode in to the macro,
* and just skip the check whenever epoch markers may be in use.
@@ -1615,7 +651,6 @@ if ( ( (entry_ptr) == NULL ) || \
((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \
((cache_ptr)->max_size)[(entry_ptr)->type->id] \
= (entry_ptr)->size; \
- } \
}
#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
@@ -1781,21 +816,21 @@ if ( ( (entry_ptr) == NULL ) || \
#if H5C_DO_SANITY_CHECKS
-#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
-if ( ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
- ( (entry_ptr) == NULL ) || \
- ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
- ( (entry_ptr)->ht_next != NULL ) || \
- ( (entry_ptr)->ht_prev != NULL ) || \
- ( (entry_ptr)->size <= 0 ) || \
- ( (k = H5C__HASH_FCN((entry_ptr)->addr)) < 0 ) || \
- ( k >= H5C__HASH_TABLE_LEN ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
- "Pre HT insert SC failed") \
+#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
+ ( (entry_ptr)->ht_next != NULL ) || \
+ ( (entry_ptr)->ht_prev != NULL ) || \
+ ( (entry_ptr)->size <= 0 ) || \
+ ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \
+ ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
+ "Pre HT insert SC failed") \
}
#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
@@ -1880,7 +915,6 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
- ( (entry_ptr == NULL) ) || \
( ( !( was_clean ) || \
( (cache_ptr)->clean_index_size < (old_size) ) ) && \
( ( (was_clean) ) || \
@@ -1978,22 +1012,18 @@ if ( (cache_ptr)->index_size != \
H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
k = H5C__HASH_FCN((entry_ptr)->addr); \
if ( ((cache_ptr)->index)[k] == NULL ) \
- { \
((cache_ptr)->index)[k] = (entry_ptr); \
- } \
- else \
- { \
+ else { \
(entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
(entry_ptr)->ht_next->ht_prev = (entry_ptr); \
((cache_ptr)->index)[k] = (entry_ptr); \
} \
(cache_ptr)->index_len++; \
(cache_ptr)->index_size += (entry_ptr)->size; \
- if ( (entry_ptr)->is_dirty ) { \
+ if ( (entry_ptr)->is_dirty ) \
(cache_ptr)->dirty_index_size += (entry_ptr)->size; \
- } else { \
+ else \
(cache_ptr)->clean_index_size += (entry_ptr)->size; \
- } \
if ((entry_ptr)->flush_me_last) { \
(cache_ptr)->num_last_entries++; \
HDassert((cache_ptr)->num_last_entries == 1); \
@@ -2007,26 +1037,19 @@ if ( (cache_ptr)->index_size != \
H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
k = H5C__HASH_FCN((entry_ptr)->addr); \
if ( (entry_ptr)->ht_next ) \
- { \
(entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- } \
if ( (entry_ptr)->ht_prev ) \
- { \
(entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- } \
if ( ((cache_ptr)->index)[k] == (entry_ptr) ) \
- { \
((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \
- } \
(entry_ptr)->ht_next = NULL; \
(entry_ptr)->ht_prev = NULL; \
(cache_ptr)->index_len--; \
(cache_ptr)->index_size -= (entry_ptr)->size; \
- if ( (entry_ptr)->is_dirty ) { \
+ if ( (entry_ptr)->is_dirty ) \
(cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
- } else { \
+ else \
(cache_ptr)->clean_index_size -= (entry_ptr)->size; \
- } \
if ((entry_ptr)->flush_me_last) { \
(cache_ptr)->num_last_entries--; \
HDassert((cache_ptr)->num_last_entries == 0); \
@@ -2041,20 +1064,15 @@ if ( (cache_ptr)->index_size != \
H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
k = H5C__HASH_FCN(Addr); \
entry_ptr = ((cache_ptr)->index)[k]; \
- while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \
- { \
+ while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) { \
(entry_ptr) = (entry_ptr)->ht_next; \
(depth)++; \
} \
- if ( entry_ptr ) \
- { \
+ if ( entry_ptr ) { \
H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
- if ( entry_ptr != ((cache_ptr)->index)[k] ) \
- { \
+ if ( entry_ptr != ((cache_ptr)->index)[k] ) { \
if ( (entry_ptr)->ht_next ) \
- { \
(entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- } \
HDassert( (entry_ptr)->ht_prev != NULL ); \
(entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
@@ -2074,20 +1092,15 @@ if ( (cache_ptr)->index_size != \
H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
k = H5C__HASH_FCN(Addr); \
entry_ptr = ((cache_ptr)->index)[k]; \
- while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \
- { \
+ while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) { \
(entry_ptr) = (entry_ptr)->ht_next; \
(depth)++; \
} \
- if ( entry_ptr ) \
- { \
+ if ( entry_ptr ) { \
H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
- if ( entry_ptr != ((cache_ptr)->index)[k] ) \
- { \
+ if ( entry_ptr != ((cache_ptr)->index)[k] ) { \
if ( (entry_ptr)->ht_next ) \
- { \
(entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- } \
HDassert( (entry_ptr)->ht_prev != NULL ); \
(entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
@@ -2122,16 +1135,14 @@ if ( (cache_ptr)->index_size != \
entry_ptr, was_clean) \
(cache_ptr)->index_size -= (old_size); \
(cache_ptr)->index_size += (new_size); \
- if ( was_clean ) { \
+ if ( was_clean ) \
(cache_ptr)->clean_index_size -= (old_size); \
- } else { \
+ else \
(cache_ptr)->dirty_index_size -= (old_size); \
- } \
- if ( (entry_ptr)->is_dirty ) { \
+ if ( (entry_ptr)->is_dirty ) \
(cache_ptr)->dirty_index_size += (new_size); \
- } else { \
+ else \
(cache_ptr)->clean_index_size += (new_size); \
- } \
H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
entry_ptr) \
}
@@ -2208,8 +1219,7 @@ if ( (cache_ptr)->index_size != \
HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
HDassert( !((entry_ptr)->in_slist) ); \
\
- if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) \
- < 0 ) \
+ if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
"Can't insert entry in skip list") \
\
@@ -2235,8 +1245,7 @@ if ( (cache_ptr)->index_size != \
HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
HDassert( !((entry_ptr)->in_slist) ); \
\
- if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) \
- < 0 ) \
+ if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
"Can't insert entry in skip list") \
\
@@ -2297,8 +1306,7 @@ if ( (cache_ptr)->index_size != \
HDassert( (cache_ptr)->slist_ptr ); \
\
if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
- != (entry_ptr) ) \
- \
+ != (entry_ptr) ) \
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
"Can't delete entry from skip list.") \
\
@@ -3601,5 +2609,983 @@ if ( (cache_ptr)->index_size != \
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+/****************************/
+/* Package Private Typedefs */
+/****************************/
+
+/****************************************************************************
+ *
+ * structure H5C_t
+ *
+ * Catchall structure for all variables specific to an instance of the cache.
+ *
+ * While the individual fields of the structure are discussed below, the
+ * following overview may be helpful.
+ *
+ * Entries in the cache are stored in an instance of H5TB_TREE, indexed on
+ * the entry's disk address. While the H5TB_TREE is less efficient than
+ * hash table, it keeps the entries in address sorted order. As flushes
+ * in parallel mode are more efficient if they are issued in increasing
+ * address order, this is a significant benefit. Also the H5TB_TREE code
+ * was readily available, which reduced development time.
+ *
+ * While the cache was designed with multiple replacement policies in mind,
+ * at present only a modified form of LRU is supported.
+ *
+ * JRM - 4/26/04
+ *
+ * Profiling has indicated that searches in the instance of H5TB_TREE are
+ * too expensive. To deal with this issue, I have augmented the cache
+ * with a hash table in which all entries will be stored. Given the
+ * advantages of flushing entries in increasing address order, the TBBT
+ * is retained, but only dirty entries are stored in it. At least for
+ * now, we will leave entries in the TBBT after they are flushed.
+ *
+ * Note that index_size and index_len now refer to the total size of
+ * and number of entries in the hash table.
+ *
+ * JRM - 7/19/04
+ *
+ * The TBBT has since been replaced with a skip list. This change
+ * greatly predates this note.
+ *
+ * JRM - 9/26/05
+ *
+ * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC.
+ * This field is used to validate pointers to instances of
+ * H5C_t.
+ *
+ * flush_in_progress: Boolean flag indicating whether a flush is in
+ * progress.
+ *
+ * trace_file_ptr: File pointer pointing to the trace file, which is used
+ * to record cache operations for use in simulations and design
+ * studies. This field will usually be NULL, indicating that
+ * no trace file should be recorded.
+ *
+ * Since much of the code supporting the parallel metadata
+ * cache is in H5AC, we don't write the trace file from
+ * H5C. Instead, H5AC reads the trace_file_ptr as needed.
+ *
+ * When we get to using H5C in other places, we may add
+ * code to write trace file data at the H5C level as well.
+ *
+ * aux_ptr: Pointer to void used to allow wrapper code to associate
+ * its data with an instance of H5C_t. The H5C cache code
+ * sets this field to NULL, and otherwise leaves it alone.
+ *
+ * max_type_id: Integer field containing the maximum type id number assigned
+ * to a type of entry in the cache. All type ids from 0 to
+ * max_type_id inclusive must be defined. The names of the
+ * types are stored in the type_name_table discussed below, and
+ * indexed by the ids.
+ *
+ * type_name_table_ptr: Pointer to an array of pointer to char of length
+ * max_type_id + 1. The strings pointed to by the entries
+ * in the array are the names of the entry types associated
+ * with the indexing type IDs.
+ *
+ * max_cache_size: Nominal maximum number of bytes that may be stored in the
+ * cache. This value should be viewed as a soft limit, as the
+ * cache can exceed this value under the following circumstances:
+ *
+ * a) All entries in the cache are protected, and the cache is
+ * asked to insert a new entry. In this case the new entry
+ * will be created. If this causes the cache to exceed
+ * max_cache_size, it will do so. The cache will attempt
+ * to reduce its size as entries are unprotected.
+ *
+ * b) When running in parallel mode, the cache may not be
+ * permitted to flush a dirty entry in response to a read.
+ * If there are no clean entries available to evict, the
+ * cache will exceed its maximum size. Again the cache
+ * will attempt to reduce its size to the max_cache_size
+ * limit on the next cache write.
+ *
+ * c) When an entry increases in size, the cache may exceed
+ * the max_cache_size limit until the next time the cache
+ * attempts to load or insert an entry.
+ *
+ * min_clean_size: Nominal minimum number of clean bytes in the cache.
+ * The cache attempts to maintain this number of bytes of
+ * clean data so as to avoid case b) above. Again, this is
+ * a soft limit.
+ *
+ *
+ * In addition to the call back functions required for each entry, the
+ * cache requires the following call back functions for this instance of
+ * the cache as a whole:
+ *
+ * check_write_permitted: In certain applications, the cache may not
+ * be allowed to write to disk at certain time. If specified,
+ * the check_write_permitted function is used to determine if
+ * a write is permissible at any given point in time.
+ *
+ * If no such function is specified (i.e. this field is NULL),
+ * the cache uses the following write_permitted field to
+ * determine whether writes are permitted.
+ *
+ * write_permitted: If check_write_permitted is NULL, this boolean flag
+ * indicates whether writes are permitted.
+ *
+ * log_flush: If provided, this function is called whenever a dirty
+ * entry is flushed to disk.
+ *
+ *
+ * In cases where memory is plentiful, and performance is an issue, it may
+ * be useful to disable all cache evictions, and thereby postpone metadata
+ * writes. The following field is used to implement this.
+ *
+ * evictions_enabled: Boolean flag that is initialized to TRUE. When
+ * this flag is set to FALSE, the metadata cache will not
+ * attempt to evict entries to make space for newly protected
+ * entries, and instead the will grow without limit.
+ *
+ * Needless to say, this feature must be used with care.
+ *
+ *
+ * The cache requires an index to facilitate searching for entries. The
+ * following fields support that index.
+ *
+ * index_len: Number of entries currently in the hash table used to index
+ * the cache.
+ *
+ * index_size: Number of bytes of cache entries currently stored in the
+ * hash table used to index the cache.
+ *
+ * This value should not be mistaken for footprint of the
+ * cache in memory. The average cache entry is small, and
+ * the cache has a considerable overhead. Multiplying the
+ * index_size by two should yield a conservative estimate
+ * of the cache's memory footprint.
+ *
+ * clean_index_size: Number of bytes of clean entries currently stored in
+ * the hash table. Note that the index_size field (above)
+ * is also the sum of the sizes of all entries in the cache.
+ * Thus we should have the invariant that clean_index_size +
+ * dirty_index_size == index_size.
+ *
+ * WARNING:
+ *
+ * 1) The clean_index_size field is not maintained by the
+ * index macros, as the hash table doesn't care whether
+ * the entry is clean or dirty. Instead the field is
+ * maintained in the H5C__UPDATE_RP macros.
+ *
+ * 2) The value of the clean_index_size must not be mistaken
+ * for the current clean size of the cache. Rather, the
+ * clean size of the cache is the current value of
+ * clean_index_size plus the amount of empty space (if any)
+ * in the cache.
+ *
+ * dirty_index_size: Number of bytes of dirty entries currently stored in
+ * the hash table. Note that the index_size field (above)
+ * is also the sum of the sizes of all entries in the cache.
+ * Thus we should have the invariant that clean_index_size +
+ * dirty_index_size == index_size.
+ *
+ * WARNING:
+ *
+ * 1) The dirty_index_size field is not maintained by the
+ * index macros, as the hash table doesn't care whether
+ * the entry is clean or dirty. Instead the field is
+ * maintained in the H5C__UPDATE_RP macros.
+ *
+ * index: Array of pointer to H5C_cache_entry_t of size
+ * H5C__HASH_TABLE_LEN. At present, this value is a power
+ * of two, not the usual prime number.
+ *
+ * I hope that the variable size of cache elements, the large
+ * hash table size, and the way in which HDF5 allocates space
+ * will combine to avoid problems with periodicity. If so, we
+ * can use a trivial hash function (a bit-and and a 3 bit left
+ * shift) with some small savings.
+ *
+ * If not, it will become evident in the statistics. Changing
+ * to the usual prime number length hash table will require
+ * changing the H5C__HASH_FCN macro and the deletion of the
+ * H5C__HASH_MASK #define. No other changes should be required.
+ *
+ *
+ * With the addition of cache entry tagging, it is possible that
+ * an entry may be inserted into the cache without a tag during testing
+ * and the tag's validity shouldn't be checked.
+ *
+ * The following field is maintained to facilitate this.
+ *
+ * ignore_tags: Boolean flag to disable tag validation during entry insertion.
+ *
+ * When we flush the cache, we need to write entries out in increasing
+ * address order. An instance of a skip list is used to store dirty entries in
+ * sorted order. Whether it is cheaper to sort the dirty entries as needed,
+ * or to maintain the list is an open question. At a guess, it depends
+ * on how frequently the cache is flushed. We will see how it goes.
+ *
+ * For now at least, I will not remove dirty entries from the list as they
+ * are flushed. (this has been changed -- dirty entries are now removed from
+ * the skip list as they are flushed. JRM - 10/25/05)
+ *
+ * slist_len: Number of entries currently in the skip list
+ * used to maintain a sorted list of dirty entries in the
+ * cache.
+ *
+ * slist_size: Number of bytes of cache entries currently stored in the
+ * skip list used to maintain a sorted list of
+ * dirty entries in the cache.
+ *
+ * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted
+ * list of dirty entries in the cache. This sorted list has
+ * two uses:
+ *
+ * a) It allows us to flush dirty entries in increasing address
+ * order, which results in significant savings.
+ *
+ * b) It facilitates checking for adjacent dirty entries when
+ * attempting to evict entries from the cache. While we
+ * don't use this at present, I hope that this will allow
+ * some optimizations when I get to it.
+ *
+ * num_last_entries: The number of entries in the cache that can only be
+ * flushed after all other entries in the cache have
+ * been flushed. At this time, this will only ever be
+ * one entry (the superblock), and the code has been
+ * protected with HDasserts to enforce this. This restraint
+ * can certainly be relaxed in the future if the need for
+ * multiple entries being flushed last arises, though
+ * explicit tests for that case should be added when said
+ * HDasserts are removed.
+ *
+ * With the addition of the fractal heap, the cache must now deal with
+ * the case in which entries may be dirtied, moved, or have their sizes
+ * changed during a flush. To allow sanity checks in this situation, the
+ * following two fields have been added. They are only compiled in when
+ * H5C_DO_SANITY_CHECKS is TRUE.
+ *
+ * slist_len_increase: Number of entries that have been added to the
+ * slist since the last time this field was set to zero.
+ *
+ * slist_size_increase: Total size of all entries that have been added
+ * to the slist since the last time this field was set to
+ * zero.
+ *
+ *
+ * When a cache entry is protected, it must be removed from the LRU
+ * list(s) as it cannot be either flushed or evicted until it is unprotected.
+ * The following fields are used to implement the protected list (pl).
+ *
+ * pl_len: Number of entries currently residing on the protected list.
+ *
+ * pl_size: Number of bytes of cache entries currently residing on the
+ * protected list.
+ *
+ * pl_head_ptr: Pointer to the head of the doubly linked list of protected
+ * entries. Note that cache entries on this list are linked
+ * by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected
+ * entries. Note that cache entries on this list are linked
+ * by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ *
+ * For very frequently used entries, the protect/unprotect overhead can
+ * become burdensome. To avoid this overhead, I have modified the cache
+ * to allow entries to be "pinned". A pinned entry is similar to a
+ * protected entry, in the sense that it cannot be evicted, and that
+ * the entry can be modified at any time.
+ *
+ * Pinning an entry has the following implications:
+ *
+ * 1) A pinned entry cannot be evicted. Thus unprotected
+ * pinned entries reside in the pinned entry list, instead
+ * of the LRU list(s) (or other lists maintained by the current
+ * replacement policy code).
+ *
+ * 2) A pinned entry can be accessed or modified at any time.
+ * Therefore, the cache must check with the entry owner
+ * before flushing it. If permission is denied, the
+ * cache just skips the entry in the flush.
+ *
+ * 3) A pinned entry can be marked as dirty (and possibly
+ * change size) while it is unprotected.
+ *
+ * 4) The flush-destroy code must allow pinned entries to
+ * be unpinned (and possibly unprotected) during the
+ * flush.
+ *
+ * Since pinned entries cannot be evicted, they must be kept on a pinned
+ * entry list (pel), instead of being entrusted to the replacement policy
+ * code.
+ *
+ * Maintaining the pinned entry list requires the following fields:
+ *
+ * pel_len: Number of entries currently residing on the pinned
+ * entry list.
+ *
+ * pel_size: Number of bytes of cache entries currently residing on
+ * the pinned entry list.
+ *
+ * pel_head_ptr: Pointer to the head of the doubly linked list of pinned
+ * but not protected entries. Note that cache entries on
+ * this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned
+ * but not protected entries. Note that cache entries on
+ * this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ *
+ * The cache must have a replacement policy, and the fields supporting this
+ * policy must be accessible from this structure.
+ *
+ * While there has been interest in several replacement policies for
+ * this cache, the initial development schedule is tight. Thus I have
+ * elected to support only a modified LRU (least recently used) policy
+ * for the first cut.
+ *
+ * To further simplify matters, I have simply included the fields needed
+ * by the modified LRU in this structure. When and if we add support for
+ * other policies, it will probably be easiest to just add the necessary
+ * fields to this structure as well -- we only create one instance of this
+ * structure per file, so the overhead is not excessive.
+ *
+ *
+ * Fields supporting the modified LRU policy:
+ *
+ * See most any OS text for a discussion of the LRU replacement policy.
+ *
+ * When operating in parallel mode, we must ensure that a read does not
+ * cause a write. If it does, the process will hang, as the write will
+ * be collective and the other processes will not know to participate.
+ *
+ * To deal with this issue, I have modified the usual LRU policy by adding
+ * clean and dirty LRU lists to the usual LRU list.
+ *
+ * The clean LRU list is simply the regular LRU list with all dirty cache
+ * entries removed.
+ *
+ * Similarly, the dirty LRU list is the regular LRU list with all the clean
+ * cache entries removed.
+ *
+ * When reading in parallel mode, we evict from the clean LRU list only.
+ * This implies that we must try to ensure that the clean LRU list is
+ * reasonably well stocked at all times.
+ *
+ * We attempt to do this by trying to flush enough entries on each write
+ * to keep the cLRU_list_size >= min_clean_size.
+ *
+ * Even if we start with a completely clean cache, a sequence of protects
+ * without unprotects can empty the clean LRU list. In this case, the
+ * cache must grow temporarily. At the next write, we will attempt to
+ * evict enough entries to reduce index_size to less than max_cache_size.
+ * While this will usually be possible, all bets are off if enough entries
+ * are protected.
+ *
+ * Discussions of the individual fields used by the modified LRU replacement
+ * policy follow:
+ *
+ * LRU_list_len: Number of cache entries currently on the LRU list.
+ *
+ * Observe that LRU_list_len + pl_len must always equal
+ * index_len.
+ *
+ * LRU_list_size: Number of bytes of cache entries currently residing on the
+ * LRU list.
+ *
+ * Observe that LRU_list_size + pl_size must always equal
+ * index_size.
+ *
+ * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache
+ * entries on this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache
+ * entries on this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * cLRU_list_len: Number of cache entries currently on the clean LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * cLRU_list_size: Number of bytes of cache entries currently residing on
+ * the clean LRU list.
+ *
+ * Observe that cLRU_list_size + dLRU_list_size must always
+ * equal LRU_list_size.
+ *
+ * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * dLRU_list_len: Number of cache entries currently on the dirty LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * dLRU_list_size: Number of cache entries currently on the dirty LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ *
+ * Automatic cache size adjustment:
+ *
+ * While the default cache size is adequate for most cases, we can run into
+ * cases where the default is too small. Ideally, we will let the user
+ * adjust the cache size as required. However, this is not possible in all
+ * cases. Thus I have added automatic cache size adjustment code.
+ *
+ * The configuration for the automatic cache size adjustment is stored in
+ * the structure described below:
+ *
+ * size_increase_possible: Depending on the configuration data given
+ * in the resize_ctl field, it may or may not be possible
+ * to increase the size of the cache. Rather than test for
+ * all the ways this can happen, we simply set this flag when
+ * we receive a new configuration.
+ *
+ * flash_size_increase_possible: Depending on the configuration data given
+ * in the resize_ctl field, it may or may not be possible
+ * for a flash size increase to occur. We set this flag
+ * whenever we receive a new configuration so as to avoid
+ * repeated calculations.
+ *
+ * flash_size_increase_threshold: If a flash cache size increase is possible,
+ * this field is used to store the minimum size of a new entry
+ * or size increase needed to trigger a flash cache size
+ * increase. Note that this field must be updated whenever
+ * the size of the cache is changed.
+ *
+ * size_decrease_possible: Depending on the configuration data given
+ * in the resize_ctl field, it may or may not be possible
+ * to decrease the size of the cache. Rather than test for
+ * all the ways this can happen, we simply set this flag when
+ * we receive a new configuration.
+ *
+ * cache_full: Boolean flag used to keep track of whether the cache is
+ * full, so we can refrain from increasing the size of a
+ * cache which hasn't used up the space allotted to it.
+ *
+ * The field is initialized to FALSE, and then set to TRUE
+ * whenever we attempt to make space in the cache.
+ *
+ * resize_enabled: This is another convenience flag which is set whenever
+ * a new set of values for resize_ctl are provided. Very
+ * simply,
+ *
+ * resize_enabled = size_increase_possible ||
+ * size_decrease_possible;
+ *
+ * size_decreased: Boolean flag set to TRUE whenever the maximum cache
+ * size is decreased. The flag triggers a call to
+ * H5C_make_space_in_cache() on the next call to H5C_protect().
+ *
+ * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
+ * data for automatic cache resizing.
+ *
+ * epoch_markers_active: Integer field containing the number of epoch
+ * markers currently in use in the LRU list. This value
+ * must be in the range [0, H5C__MAX_EPOCH_MARKERS - 1].
+ *
+ * epoch_marker_active: Array of boolean of length H5C__MAX_EPOCH_MARKERS.
+ * This array is used to track which epoch markers are currently
+ * in use.
+ *
+ * epoch_marker_ringbuf: Array of int of length H5C__MAX_EPOCH_MARKERS + 1.
+ *
+ * To manage the epoch marker cache entries, it is necessary
+ * to track their order in the LRU list. This is done with
+ * epoch_marker_ringbuf. When markers are inserted at the
+ * head of the LRU list, the index of the marker in the
+ * epoch_markers array is inserted at the tail of the ring
+ * buffer. When it becomes the epoch_marker_active'th marker
+ * in the LRU list, it will have worked its way to the head
+ * of the ring buffer as well. This allows us to remove it
+ * without scanning the LRU list if such is required.
+ *
+ * epoch_marker_ringbuf_first: Integer field containing the index of the
+ * first entry in the ring buffer.
+ *
+ * epoch_marker_ringbuf_last: Integer field containing the index of the
+ * last entry in the ring buffer.
+ *
+ * epoch_marker_ringbuf_size: Integer field containing the number of entries
+ * in the ring buffer.
+ *
+ * epoch_markers: Array of instances of H5C_cache_entry_t of length
+ * H5C__MAX_EPOCH_MARKERS. The entries are used as markers
+ * in the LRU list to identify cache entries that haven't
+ * been accessed for some (small) specified number of
+ * epochs. These entries (if any) can then be evicted and
+ * the cache size reduced -- ideally without evicting any
+ * of the current working set. Needless to say, the epoch
+ * length and the number of epochs before an unused entry
+ * must be chosen so that all, or almost all, the working
+ * set will be accessed before the limit.
+ *
+ * Epoch markers only appear in the LRU list, never in
+ * the index or slist. While they are of type
+ * H5C__EPOCH_MARKER_TYPE, and have associated class
+ * functions, these functions should never be called.
+ *
+ * The addr fields of these instances of H5C_cache_entry_t
+ * are set to the index of the instance in the epoch_markers
+ * array, the size is set to 0, and the type field points
+ * to the constant structure epoch_marker_class defined
+ * in H5C.c. The next and prev fields are used as usual
+ * to link the entry into the LRU list.
+ *
+ * All other fields are unused.
+ *
+ *
+ * Cache hit rate collection fields:
+ *
+ * We supply the current cache hit rate on request, so we must keep a
+ * simple cache hit rate computation regardless of whether statistics
+ * collection is enabled. The following fields support this capability.
+ *
+ * cache_hits: Number of cache hits since the last time the cache hit
+ * rate statistics were reset. Note that when automatic cache
+ * re-sizing is enabled, this field will be reset every automatic
+ * resize epoch.
+ *
+ * cache_accesses: Number of times the cache has been accessed while
+ * since the last since the last time the cache hit rate statistics
+ * were reset. Note that when automatic cache re-sizing is enabled,
+ * this field will be reset every automatic resize epoch.
+ *
+ *
+ * Statistics collection fields:
+ *
+ * When enabled, these fields are used to collect statistics as described
+ * below. The first set are collected only when H5C_COLLECT_CACHE_STATS
+ * is true.
+ *
+ * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been in cache when requested in
+ * the current epoch.
+ *
+ * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has not been in cache when
+ * requested in the current epoch.
+ *
+ * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry with
+ * type id equal to the array index has been write protected
+ * in the current epoch.
+ *
+ * Observe that (hits + misses) = (write_protects + read_protects).
+ *
+ * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry with
+ * type id equal to the array index has been read protected in
+ * the current epoch.
+ *
+ * Observe that (hits + misses) = (write_protects + read_protects).
+ *
+ * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to maximum number of simultaneous read
+ * protects on any entry with type id equal to the array index
+ * in the current epoch.
+ *
+ * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been inserted into the
+ * cache in the current epoch.
+ *
+ * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has been inserted
+ * pinned into the cache in the current epoch.
+ *
+ * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times a dirty entry with type
+ * id equal to the array index has been cleared in the current
+ * epoch.
+ *
+ * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been written to disk in the
+ * current epoch.
+ *
+ * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been evicted from the cache in
+ * the current epoch.
+ *
+ * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been moved in the current
+ * epoch.
+ *
+ * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has been moved
+ * during its flush callback in the current epoch.
+ *
+ * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has been moved
+ * during a cache flush in the current epoch.
+ *
+ * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been pinned in the current
+ * epoch.
+ *
+ * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been unpinned in the current
+ * epoch.
+ *
+ * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been marked dirty while pinned
+ * in the current epoch.
+ *
+ * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry
+ * with type id equal to the array index has been flushed while
+ * pinned in the current epoch.
+ *
+ * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry
+ * with type id equal to the array index has been cleared while
+ * pinned in the current epoch.
+ *
+ * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has increased in
+ * size in the current epoch.
+ *
+ * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has decreased in
+ * size in the current epoch.
+ *
+ * entry_flush_size_changes: Array of int64 of length
+ * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
+ * the number of times an entry with type id equal to the
+ * array index has changed size while in its flush callback.
+ *
+ * cache_flush_size_changes: Array of int64 of length
+ * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
+ * the number of times an entry with type id equal to the
+ * array index has changed size during a cache flush
+ *
+ * total_ht_insertions: Number of times entries have been inserted into the
+ * hash table in the current epoch.
+ *
+ * total_ht_deletions: Number of times entries have been deleted from the
+ * hash table in the current epoch.
+ *
+ * successful_ht_searches: int64 containing the total number of successful
+ * searches of the hash table in the current epoch.
+ *
+ * total_successful_ht_search_depth: int64 containing the total number of
+ * entries other than the targets examined in successful
+ * searches of the hash table in the current epoch.
+ *
+ * failed_ht_searches: int64 containing the total number of unsuccessful
+ * searches of the hash table in the current epoch.
+ *
+ * total_failed_ht_search_depth: int64 containing the total number of
+ * entries examined in unsuccessful searches of the hash
+ * table in the current epoch.
+ *
+ * max_index_len: Largest value attained by the index_len field in the
+ * current epoch.
+ *
+ * max_index_size: Largest value attained by the index_size field in the
+ * current epoch.
+ *
+ * max_clean_index_size: Largest value attained by the clean_index_size field
+ * in the current epoch.
+ *
+ * max_dirty_index_size: Largest value attained by the dirty_index_size field
+ * in the current epoch.
+ *
+ * max_slist_len: Largest value attained by the slist_len field in the
+ * current epoch.
+ *
+ * max_slist_size: Largest value attained by the slist_size field in the
+ * current epoch.
+ *
+ * max_pl_len: Largest value attained by the pl_len field in the
+ * current epoch.
+ *
+ * max_pl_size: Largest value attained by the pl_size field in the
+ * current epoch.
+ *
+ * max_pel_len: Largest value attained by the pel_len field in the
+ * current epoch.
+ *
+ * max_pel_size: Largest value attained by the pel_size field in the
+ * current epoch.
+ *
+ * calls_to_msic: Total number of calls to H5C_make_space_in_cache
+ *
+ * total_entries_skipped_in_msic: Number of clean entries skipped while
+ * enforcing the min_clean_fraction in H5C_make_space_in_cache().
+ *
+ * total_entries_scanned_in_msic: Number of clean entries skipped while
+ * enforcing the min_clean_fraction in H5C_make_space_in_cache().
+ *
+ * max_entries_skipped_in_msic: Maximum number of clean entries skipped
+ * in any one call to H5C_make_space_in_cache().
+ *
+ * max_entries_scanned_in_msic: Maximum number of entries scanned over
+ * in any one call to H5C_make_space_in_cache().
+ *
+ * entries_scanned_to_make_space: Number of entries scanned only when looking
+ * for entries to evict in order to make space in cache.
+
+ * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
+ * and H5C_COLLECT_CACHE_ENTRY_STATS are true.
+ *
+ * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been
+ * accessed in the current epoch.
+ *
+ * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the minimum number of times any single
+ * entry with type id equal to the array index has been
+ * accessed in the current epoch.
+ *
+ * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been cleared
+ * in the current epoch.
+ *
+ * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been
+ * flushed in the current epoch.
+ *
+ * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum size of any single entry
+ * with type id equal to the array index that has resided in
+ * the cache in the current epoch.
+ *
+ * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times that any single
+ * entry with type id equal to the array index that has been
+ * marked as pinned in the cache in the current epoch.
+ *
+ *
+ * Fields supporting testing:
+ *
+ * prefix Array of char used to prefix debugging output. The
+ * field is intended to allow marking of output of with
+ * the processes mpi rank.
+ *
+ * get_entry_ptr_from_addr_counter: Counter used to track the number of
+ * times the H5C_get_entry_ptr_from_addr() function has been
+ * called successfully. This field is only defined when
+ * NDEBUG is not #defined.
+ *
+ ****************************************************************************/
+struct H5C_t {
+ uint32_t magic;
+ hbool_t flush_in_progress;
+ FILE * trace_file_ptr;
+ void * aux_ptr;
+ int32_t max_type_id;
+ const char * (* type_name_table_ptr);
+ size_t max_cache_size;
+ size_t min_clean_size;
+ H5C_write_permitted_func_t check_write_permitted;
+ hbool_t write_permitted;
+ H5C_log_flush_func_t log_flush;
+ hbool_t evictions_enabled;
+
+ /* Fields for maintaining [hash table] index of entries */
+ int32_t index_len;
+ size_t index_size;
+ size_t clean_index_size;
+ size_t dirty_index_size;
+ H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]);
+
+ /* Field to disable tag validation */
+ hbool_t ignore_tags;
+
+ int32_t slist_len;
+ size_t slist_size;
+ H5SL_t * slist_ptr;
+ int32_t num_last_entries;
+#if H5C_DO_SANITY_CHECKS
+ int64_t slist_len_increase;
+ int64_t slist_size_increase;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* Fields for tracking protected entries */
+ int32_t pl_len;
+ size_t pl_size;
+ H5C_cache_entry_t * pl_head_ptr;
+ H5C_cache_entry_t * pl_tail_ptr;
+
+ /* Fields for tracking pinned entries */
+ int32_t pel_len;
+ size_t pel_size;
+ H5C_cache_entry_t * pel_head_ptr;
+ H5C_cache_entry_t * pel_tail_ptr;
+
+ /* Fields for complete LRU list of entries */
+ int32_t LRU_list_len;
+ size_t LRU_list_size;
+ H5C_cache_entry_t * LRU_head_ptr;
+ H5C_cache_entry_t * LRU_tail_ptr;
+
+ /* Fields for clean LRU list of entries */
+ int32_t cLRU_list_len;
+ size_t cLRU_list_size;
+ H5C_cache_entry_t * cLRU_head_ptr;
+ H5C_cache_entry_t * cLRU_tail_ptr;
+
+ /* Fields for dirty LRU list of entries */
+ int32_t dLRU_list_len;
+ size_t dLRU_list_size;
+ H5C_cache_entry_t * dLRU_head_ptr;
+ H5C_cache_entry_t * dLRU_tail_ptr;
+
+ /* Fields for automatic cache size adjustment */
+ hbool_t size_increase_possible;
+ hbool_t flash_size_increase_possible;
+ size_t flash_size_increase_threshold;
+ hbool_t size_decrease_possible;
+ hbool_t resize_enabled;
+ hbool_t cache_full;
+ hbool_t size_decreased;
+ H5C_auto_size_ctl_t resize_ctl;
+
+ /* Fields for epoch markers used in automatic cache size adjustment */
+ int32_t epoch_markers_active;
+ hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS];
+ int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1];
+ int32_t epoch_marker_ringbuf_first;
+ int32_t epoch_marker_ringbuf_last;
+ int32_t epoch_marker_ringbuf_size;
+ H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS];
+
+ /* Fields for cache hit rate collection */
+ int64_t cache_hits;
+ int64_t cache_accesses;
+
+#if H5C_COLLECT_CACHE_STATS
+ /* stats fields */
+ int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
+
+ /* Fields for hash table operations */
+ int64_t total_ht_insertions;
+ int64_t total_ht_deletions;
+ int64_t successful_ht_searches;
+ int64_t total_successful_ht_search_depth;
+ int64_t failed_ht_searches;
+ int64_t total_failed_ht_search_depth;
+ int32_t max_index_len;
+ size_t max_index_size;
+ size_t max_clean_index_size;
+ size_t max_dirty_index_size;
+
+ /* Fields for in-order skip list */
+ int32_t max_slist_len;
+ size_t max_slist_size;
+
+ /* Fields for protected entry list */
+ int32_t max_pl_len;
+ size_t max_pl_size;
+
+ /* Fields for pinned entry list */
+ int32_t max_pel_len;
+ size_t max_pel_size;
+
+ /* Fields for tacking 'make space in cache' (msic) operations */
+ int64_t calls_to_msic;
+ int64_t total_entries_skipped_in_msic;
+ int64_t total_entries_scanned_in_msic;
+ int32_t max_entries_skipped_in_msic;
+ int32_t max_entries_scanned_in_msic;
+ int64_t entries_scanned_to_make_space;
+
+#if H5C_COLLECT_CACHE_ENTRY_STATS
+ int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
+ size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1];
+#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ char prefix[H5C__PREFIX_LEN];
+
+#ifndef NDEBUG
+ int64_t get_entry_ptr_from_addr_counter;
+#endif /* NDEBUG */
+};
+
+/*****************************/
+/* Package Private Variables */
+/*****************************/
+
+
+/******************************/
+/* Package Private Prototypes */
+/******************************/
+
+
#endif /* _H5Cpkg_H */
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index c9679f4..3413381 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -36,6 +36,9 @@
#include "H5private.h" /* Generic Functions */
#include "H5Fprivate.h" /* File access */
+/**************************/
+/* Library Private Macros */
+/**************************/
#define H5C_DO_SANITY_CHECKS 0
#define H5C_DO_TAGGING_SANITY_CHECKS 1
@@ -61,36 +64,162 @@
* H5C_COLLECT_CACHE_STATS is also defined to true.
*/
#if H5C_COLLECT_CACHE_STATS
-
#define H5C_COLLECT_CACHE_ENTRY_STATS 1
-
#else
-
#define H5C_COLLECT_CACHE_ENTRY_STATS 0
-
#endif /* H5C_COLLECT_CACHE_STATS */
-
#ifdef H5_HAVE_PARALLEL
-
/* we must maintain the clean and dirty LRU lists when we are compiled
* with parallel support.
*/
#define H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 1
-
#else /* H5_HAVE_PARALLEL */
-
/* The clean and dirty LRU lists don't buy us anything here -- we may
* want them on for testing on occasion, but in general they should be
* off.
*/
#define H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 0
-
#endif /* H5_HAVE_PARALLEL */
+/* Flags returned from the 'flush' callback */
+#define H5C_CALLBACK__NO_FLAGS_SET 0x0
+#define H5C_CALLBACK__SIZE_CHANGED_FLAG 0x1
+#define H5C_CALLBACK__MOVED_FLAG 0x2
-/* Typedef for the main structure for the cache (defined in H5Cpkg.h) */
+/* Upper and lower limits on cache size. These limits are picked
+ * out of a hat -- you should be able to change them as necessary.
+ *
+ * However, if you need a very big cache, you should also increase the
+ * size of the hash table (H5C__HASH_TABLE_LEN in H5Cpkg.h). The current
+ * upper bound on cache size is rather large for the current hash table
+ * size.
+ */
+#define H5C__MAX_MAX_CACHE_SIZE ((size_t)(128 * 1024 * 1024))
+#define H5C__MIN_MAX_CACHE_SIZE ((size_t)(1024))
+
+/* Default max cache size and min clean size are give here to make
+ * them generally accessable.
+ */
+#define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024))
+#define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024))
+
+/* Maximum height of flush dependency relationships between entries. This is
+ * currently tuned to the extensible array (H5EA) data structure, which only
+ * requires 6 levels of dependency (i.e. heights 0-6) (actually, the extensible
+ * array needs 4 levels, plus another 2 levels are needed: one for the layer
+ * under the extensible array and one for the layer above it).
+ */
+#define H5C__NUM_FLUSH_DEP_HEIGHTS 6
+
+#ifndef NDEBUG
+/* Values for cache entry magic field */
+#define H5C__H5C_CACHE_ENTRY_T_MAGIC 0x005CAC0A
+#define H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC 0xDeadBeef
+#endif /* NDEBUG */
+
+/* Cache configuration validation definitions */
+#define H5C_RESIZE_CFG__VALIDATE_GENERAL 0x1
+#define H5C_RESIZE_CFG__VALIDATE_INCREMENT 0x2
+#define H5C_RESIZE_CFG__VALIDATE_DECREMENT 0x4
+#define H5C_RESIZE_CFG__VALIDATE_INTERACTIONS 0x8
+#define H5C_RESIZE_CFG__VALIDATE_ALL \
+( \
+ H5C_RESIZE_CFG__VALIDATE_GENERAL | \
+ H5C_RESIZE_CFG__VALIDATE_INCREMENT | \
+ H5C_RESIZE_CFG__VALIDATE_DECREMENT | \
+ H5C_RESIZE_CFG__VALIDATE_INTERACTIONS \
+)
+
+/* Cache configuration versions */
+#define H5C__CURR_AUTO_SIZE_CTL_VER 1
+#define H5C__CURR_AUTO_RESIZE_RPT_FCN_VER 1
+
+/* Number of epoch markers active */
+#define H5C__MAX_EPOCH_MARKERS 10
+
+/* Default configuration settings */
+#define H5C__DEF_AR_UPPER_THRESHHOLD 0.9999f
+#define H5C__DEF_AR_LOWER_THRESHHOLD 0.9f
+#define H5C__DEF_AR_MAX_SIZE ((size_t)(16 * 1024 * 1024))
+#define H5C__DEF_AR_INIT_SIZE ((size_t)( 1 * 1024 * 1024))
+#define H5C__DEF_AR_MIN_SIZE ((size_t)( 1 * 1024 * 1024))
+#define H5C__DEF_AR_MIN_CLEAN_FRAC 0.5f
+#define H5C__DEF_AR_INCREMENT 2.0f
+#define H5C__DEF_AR_MAX_INCREMENT ((size_t)( 2 * 1024 * 1024))
+#define H5C__DEF_AR_FLASH_MULTIPLE 1.0f
+#define H5C__DEV_AR_FLASH_THRESHOLD 0.25f
+#define H5C__DEF_AR_DECREMENT 0.9f
+#define H5C__DEF_AR_MAX_DECREMENT ((size_t)( 1 * 1024 * 1024))
+#define H5C__DEF_AR_EPCHS_B4_EVICT 3
+#define H5C__DEF_AR_EMPTY_RESERVE 0.05f
+#define H5C__MIN_AR_EPOCH_LENGTH 100
+#define H5C__DEF_AR_EPOCH_LENGTH 50000
+#define H5C__MAX_AR_EPOCH_LENGTH 1000000
+
+/* #defines of flags used in the flags parameters in some of the
+ * cache calls. Note that not all flags are applicable
+ * to all function calls. Flags that don't apply to a particular
+ * function are ignored in that function.
+ *
+ * These flags apply to all function calls:
+ * H5C__NO_FLAGS_SET (generic "no flags set" for all fcn calls)
+ *
+ *
+ * These flags apply to H5C_insert_entry():
+ * H5C__SET_FLUSH_MARKER_FLAG
+ * H5C__PIN_ENTRY_FLAG
+ *
+ * These flags apply to H5C_protect()
+ * H5C__READ_ONLY_FLAG
+ *
+ * These flags apply to H5C_unprotect():
+ * H5C__SET_FLUSH_MARKER_FLAG
+ * H5C__DELETED_FLAG
+ * H5C__DIRTIED_FLAG
+ * H5C__PIN_ENTRY_FLAG
+ * H5C__UNPIN_ENTRY_FLAG
+ * H5C__FREE_FILE_SPACE_FLAG
+ * H5C__TAKE_OWNERSHIP_FLAG
+ *
+ * These flags apply to H5C_expunge_entry():
+ * H5C__FREE_FILE_SPACE_FLAG
+ *
+ * These flags apply to H5C_flush_cache():
+ * H5C__FLUSH_INVALIDATE_FLAG
+ * H5C__FLUSH_CLEAR_ONLY_FLAG
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG
+ * H5C__FLUSH_IGNORE_PROTECTED_FLAG (can't use this flag in combination
+ * with H5C__FLUSH_INVALIDATE_FLAG)
+ *
+ * These flags apply to H5C_flush_single_entry():
+ * H5C__FLUSH_INVALIDATE_FLAG
+ * H5C__FLUSH_CLEAR_ONLY_FLAG
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG
+ * H5C__TAKE_OWNERSHIP_FLAG
+ */
+#define H5C__NO_FLAGS_SET 0x0000
+#define H5C__SET_FLUSH_MARKER_FLAG 0x0001
+#define H5C__DELETED_FLAG 0x0002
+#define H5C__DIRTIED_FLAG 0x0004
+#define H5C__PIN_ENTRY_FLAG 0x0008
+#define H5C__UNPIN_ENTRY_FLAG 0x0010
+#define H5C__FLUSH_INVALIDATE_FLAG 0x0020
+#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x0040
+#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0080
+#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0100
+#define H5C__READ_ONLY_FLAG 0x0200
+#define H5C__FREE_FILE_SPACE_FLAG 0x0800
+#define H5C__TAKE_OWNERSHIP_FLAG 0x1000
+#define H5C__FLUSH_LAST_FLAG 0x2000
+#define H5C__FLUSH_COLLECTIVELY_FLAG 0x4000
+
+
+/****************************/
+/* Library Private Typedefs */
+/****************************/
+/* Typedef for the main structure for the cache (defined in H5Cpkg.h) */
typedef struct H5C_t H5C_t;
@@ -121,10 +250,6 @@ typedef struct H5C_t H5C_t;
* Note that the space allocated on disk may not be contiguous.
*/
-#define H5C_CALLBACK__NO_FLAGS_SET 0x0
-#define H5C_CALLBACK__SIZE_CHANGED_FLAG 0x1
-#define H5C_CALLBACK__MOVED_FLAG 0x2
-
/* Actions that can be reported to 'notify' client callback */
typedef enum H5C_notify_action_t {
H5C_NOTIFY_ACTION_AFTER_INSERT, /* Entry has been added to the cache */
@@ -135,27 +260,18 @@ typedef enum H5C_notify_action_t {
H5C_NOTIFY_ACTION_BEFORE_EVICT /* Entry is about to be evicted from cache */
} H5C_notify_action_t;
-typedef void *(*H5C_load_func_t)(H5F_t *f,
- hid_t dxpl_id,
- haddr_t addr,
- void *udata);
-typedef herr_t (*H5C_flush_func_t)(H5F_t *f,
- hid_t dxpl_id,
- hbool_t dest,
- haddr_t addr,
- void *thing,
- unsigned * flags_ptr);
-typedef herr_t (*H5C_dest_func_t)(H5F_t *f,
- void *thing);
-typedef herr_t (*H5C_clear_func_t)(H5F_t *f,
- void *thing,
- hbool_t dest);
-typedef herr_t (*H5C_notify_func_t)(H5C_notify_action_t action,
- void *thing);
-typedef herr_t (*H5C_size_func_t)(const H5F_t *f,
- const void *thing,
- size_t *size_ptr);
-
+/* Cache client callback function pointers */
+typedef void *(*H5C_load_func_t)(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ void *udata);
+typedef herr_t (*H5C_flush_func_t)(H5F_t *f, hid_t dxpl_id, hbool_t dest,
+ haddr_t addr, void *thing, unsigned *flags_ptr);
+typedef herr_t (*H5C_dest_func_t)(H5F_t *f, void *thing);
+typedef herr_t (*H5C_clear_func_t)(H5F_t *f, void *thing, hbool_t dest);
+typedef herr_t (*H5C_notify_func_t)(H5C_notify_action_t action, void *thing);
+typedef herr_t (*H5C_size_func_t)(const H5F_t *f, const void *thing,
+ size_t *size_ptr);
+
+/* Metadata cache client class definition */
typedef struct H5C_class_t {
int id;
H5C_load_func_t load;
@@ -166,49 +282,11 @@ typedef struct H5C_class_t {
H5C_size_func_t size;
} H5C_class_t;
-
-/* Type defintions of call back functions used by the cache as a whole */
-
+/* Type defintions of callback functions used by the cache as a whole */
typedef herr_t (*H5C_write_permitted_func_t)(const H5F_t *f,
- hid_t dxpl_id,
- hbool_t * write_permitted_ptr);
-
-typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
- haddr_t addr,
- hbool_t was_dirty,
- unsigned flags,
- int type_id);
-
-/* Upper and lower limits on cache size. These limits are picked
- * out of a hat -- you should be able to change them as necessary.
- *
- * However, if you need a very big cache, you should also increase the
- * size of the hash table (H5C__HASH_TABLE_LEN in H5Cpkg.h). The current
- * upper bound on cache size is rather large for the current hash table
- * size.
- */
-
-#define H5C__MAX_MAX_CACHE_SIZE ((size_t)(128 * 1024 * 1024))
-#define H5C__MIN_MAX_CACHE_SIZE ((size_t)(1024))
-
-
-/* Default max cache size and min clean size are give here to make
- * them generally accessable.
- */
-
-#define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024))
-#define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024))
-
-/* Maximum height of flush dependency relationships between entries. This is
- * currently tuned to the extensible array (H5EA) data structure, which only
- * requires 6 levels of dependency (i.e. heights 0-6) (actually, the extensible
- * array needs 4 levels, plus another 2 levels are needed: one for the layer
- * under the extensible array and one for the layer above it).
- */
-
-#define H5C__NUM_FLUSH_DEP_HEIGHTS 6
-
-
+ hbool_t *write_permitted_ptr);
+typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr,
+ hbool_t was_dirty, unsigned flags);
/****************************************************************************
*
@@ -580,21 +658,14 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* been pinned in cache in its life time.
*
****************************************************************************/
-
-#ifndef NDEBUG
-#define H5C__H5C_CACHE_ENTRY_T_MAGIC 0x005CAC0A
-#define H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC 0xDeadBeef
-#endif /* NDEBUG */
-
-typedef struct H5C_cache_entry_t
-{
+typedef struct H5C_cache_entry_t {
#ifndef NDEBUG
uint32_t magic;
#endif /* NDEBUG */
- H5C_t * cache_ptr;
+ H5C_t * cache_ptr;
haddr_t addr;
size_t size;
- const H5C_class_t * type;
+ const H5C_class_t * type;
haddr_t tag;
hbool_t is_dirty;
hbool_t dirtied;
@@ -615,39 +686,31 @@ typedef struct H5C_cache_entry_t
hbool_t free_file_space_on_destroy;
/* fields supporting the 'flush dependency' feature: */
-
- struct H5C_cache_entry_t * flush_dep_parent;
+ struct H5C_cache_entry_t * flush_dep_parent;
uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS];
unsigned flush_dep_height;
hbool_t pinned_from_client;
hbool_t pinned_from_cache;
/* fields supporting the hash table: */
-
- struct H5C_cache_entry_t * ht_next;
- struct H5C_cache_entry_t * ht_prev;
+ struct H5C_cache_entry_t * ht_next;
+ struct H5C_cache_entry_t * ht_prev;
/* fields supporting replacement policies: */
-
- struct H5C_cache_entry_t * next;
- struct H5C_cache_entry_t * prev;
- struct H5C_cache_entry_t * aux_next;
- struct H5C_cache_entry_t * aux_prev;
+ struct H5C_cache_entry_t * next;
+ struct H5C_cache_entry_t * prev;
+ struct H5C_cache_entry_t * aux_next;
+ struct H5C_cache_entry_t * aux_prev;
#if H5C_COLLECT_CACHE_ENTRY_STATS
-
/* cache entry stats fields */
-
int32_t accesses;
int32_t clears;
int32_t flushes;
int32_t pins;
-
#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
-
} H5C_cache_entry_t;
-
/****************************************************************************
*
* structure H5C_auto_size_ctl_t
@@ -888,41 +951,6 @@ typedef struct H5C_cache_entry_t
*
****************************************************************************/
-#define H5C_RESIZE_CFG__VALIDATE_GENERAL 0x1
-#define H5C_RESIZE_CFG__VALIDATE_INCREMENT 0x2
-#define H5C_RESIZE_CFG__VALIDATE_DECREMENT 0x4
-#define H5C_RESIZE_CFG__VALIDATE_INTERACTIONS 0x8
-#define H5C_RESIZE_CFG__VALIDATE_ALL \
-( \
- H5C_RESIZE_CFG__VALIDATE_GENERAL | \
- H5C_RESIZE_CFG__VALIDATE_INCREMENT | \
- H5C_RESIZE_CFG__VALIDATE_DECREMENT | \
- H5C_RESIZE_CFG__VALIDATE_INTERACTIONS \
-)
-
-#define H5C__CURR_AUTO_SIZE_CTL_VER 1
-#define H5C__CURR_AUTO_RESIZE_RPT_FCN_VER 1
-
-#define H5C__MAX_EPOCH_MARKERS 10
-
-#define H5C__DEF_AR_UPPER_THRESHHOLD 0.9999f
-#define H5C__DEF_AR_LOWER_THRESHHOLD 0.9f
-#define H5C__DEF_AR_MAX_SIZE ((size_t)(16 * 1024 * 1024))
-#define H5C__DEF_AR_INIT_SIZE ((size_t)( 1 * 1024 * 1024))
-#define H5C__DEF_AR_MIN_SIZE ((size_t)( 1 * 1024 * 1024))
-#define H5C__DEF_AR_MIN_CLEAN_FRAC 0.5f
-#define H5C__DEF_AR_INCREMENT 2.0f
-#define H5C__DEF_AR_MAX_INCREMENT ((size_t)( 2 * 1024 * 1024))
-#define H5C__DEF_AR_FLASH_MULTIPLE 1.0f
-#define H5C__DEV_AR_FLASH_THRESHOLD 0.25f
-#define H5C__DEF_AR_DECREMENT 0.9f
-#define H5C__DEF_AR_MAX_DECREMENT ((size_t)( 1 * 1024 * 1024))
-#define H5C__DEF_AR_EPCHS_B4_EVICT 3
-#define H5C__DEF_AR_EMPTY_RESERVE 0.05f
-#define H5C__MIN_AR_EPOCH_LENGTH 100
-#define H5C__DEF_AR_EPOCH_LENGTH 50000
-#define H5C__MAX_AR_EPOCH_LENGTH 1000000
-
enum H5C_resize_status
{
in_spec,
@@ -936,303 +964,118 @@ enum H5C_resize_status
not_full
}; /* enum H5C_resize_conditions */
-typedef void (*H5C_auto_resize_rpt_fcn)(H5C_t * cache_ptr,
- int32_t version,
- double hit_rate,
- enum H5C_resize_status status,
- size_t old_max_cache_size,
- size_t new_max_cache_size,
- size_t old_min_clean_size,
- size_t new_min_clean_size);
+typedef void (*H5C_auto_resize_rpt_fcn)(H5C_t * cache_ptr, int32_t version,
+ double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size,
+ size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size);
-typedef struct H5C_auto_size_ctl_t
-{
+typedef struct H5C_auto_size_ctl_t {
/* general configuration fields: */
int32_t version;
H5C_auto_resize_rpt_fcn rpt_fcn;
-
hbool_t set_initial_size;
size_t initial_size;
-
double min_clean_fraction;
-
size_t max_size;
size_t min_size;
-
int64_t epoch_length;
-
/* size increase control fields: */
enum H5C_cache_incr_mode incr_mode;
-
double lower_hr_threshold;
-
double increment;
-
hbool_t apply_max_increment;
size_t max_increment;
-
enum H5C_cache_flash_incr_mode flash_incr_mode;
double flash_multiple;
double flash_threshold;
-
/* size decrease control fields: */
enum H5C_cache_decr_mode decr_mode;
-
double upper_hr_threshold;
-
double decrement;
-
hbool_t apply_max_decrement;
size_t max_decrement;
-
int32_t epochs_before_eviction;
-
hbool_t apply_empty_reserve;
double empty_reserve;
-
} H5C_auto_size_ctl_t;
-
-/*
- * Library prototypes.
- */
-
-/* #defines of flags used in the flags parameters in some of the
- * following function calls. Note that not all flags are applicable
- * to all function calls. Flags that don't apply to a particular
- * function are ignored in that function.
- *
- * These flags apply to all function calls:
- *
- * H5C__NO_FLAGS_SET (generic "no flags set" for all fcn calls)
- *
- *
- * These flags apply to H5C_insert_entry():
- *
- * H5C__SET_FLUSH_MARKER_FLAG
- * H5C__PIN_ENTRY_FLAG
- *
- * These flags apply to H5C_protect()
- *
- * H5C__READ_ONLY_FLAG
- *
- * These flags apply to H5C_unprotect():
- *
- * H5C__SET_FLUSH_MARKER_FLAG
- * H5C__DELETED_FLAG
- * H5C__DIRTIED_FLAG
- * H5C__PIN_ENTRY_FLAG
- * H5C__UNPIN_ENTRY_FLAG
- * H5C__FREE_FILE_SPACE_FLAG
- * H5C__TAKE_OWNERSHIP_FLAG
- *
- * These flags apply to H5C_expunge_entry():
- *
- * H5C__FREE_FILE_SPACE_FLAG
- *
- * These flags apply to H5C_flush_cache():
- *
- * H5C__FLUSH_INVALIDATE_FLAG
- * H5C__FLUSH_CLEAR_ONLY_FLAG
- * H5C__FLUSH_MARKED_ENTRIES_FLAG
- * H5C__FLUSH_IGNORE_PROTECTED_FLAG (can't use this flag in combination
- * with H5C__FLUSH_INVALIDATE_FLAG)
- *
- * These flags apply to H5C_flush_single_entry():
- *
- * H5C__FLUSH_INVALIDATE_FLAG
- * H5C__FLUSH_CLEAR_ONLY_FLAG
- * H5C__FLUSH_MARKED_ENTRIES_FLAG
- * H5C__TAKE_OWNERSHIP_FLAG
- */
-
-#define H5C__NO_FLAGS_SET 0x0000
-#define H5C__SET_FLUSH_MARKER_FLAG 0x0001
-#define H5C__DELETED_FLAG 0x0002
-#define H5C__DIRTIED_FLAG 0x0004
-#define H5C__PIN_ENTRY_FLAG 0x0008
-#define H5C__UNPIN_ENTRY_FLAG 0x0010
-#define H5C__FLUSH_INVALIDATE_FLAG 0x0020
-#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x0040
-#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0080
-#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0100
-#define H5C__READ_ONLY_FLAG 0x0200
-#define H5C__FREE_FILE_SPACE_FLAG 0x0800
-#define H5C__TAKE_OWNERSHIP_FLAG 0x1000
-#define H5C__FLUSH_LAST_FLAG 0x2000
-#define H5C__FLUSH_COLLECTIVELY_FLAG 0x4000
-
-#ifdef H5_HAVE_PARALLEL
-H5_DLL herr_t H5C_apply_candidate_list(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- H5C_t * cache_ptr,
- int num_candidates,
- haddr_t * candidates_list_ptr,
- int mpi_rank,
- int mpi_size);
-
-H5_DLL herr_t H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr);
-
-H5_DLL herr_t H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr);
-#endif /* H5_HAVE_PARALLEL */
-
-H5_DLL H5C_t * H5C_create(size_t max_cache_size,
- size_t min_clean_size,
- int max_type_id,
- const char * (* type_name_table_ptr),
- H5C_write_permitted_func_t check_write_permitted,
- hbool_t write_permitted,
- H5C_log_flush_func_t log_flush,
- void * aux_ptr);
-
-H5_DLL void H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
- int32_t version,
- double hit_rate,
- enum H5C_resize_status status,
- size_t old_max_cache_size,
- size_t new_max_cache_size,
- size_t old_min_clean_size,
- size_t new_min_clean_size);
-
-H5_DLL herr_t H5C_dest(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id);
-
-H5_DLL herr_t H5C_expunge_entry(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- const H5C_class_t * type,
- haddr_t addr,
- unsigned flags);
-
-H5_DLL herr_t H5C_flush_cache(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- unsigned flags);
-
-H5_DLL herr_t H5C_flush_to_min_clean(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id);
-
-H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t * cache_ptr,
- H5C_auto_size_ctl_t *config_ptr);
-
-H5_DLL herr_t H5C_get_cache_size(H5C_t * cache_ptr,
- size_t * max_size_ptr,
- size_t * min_clean_size_ptr,
- size_t * cur_size_ptr,
- int32_t * cur_num_entries_ptr);
-
-H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t * cache_ptr,
- double * hit_rate_ptr);
-
-H5_DLL herr_t H5C_get_entry_status(const H5F_t *f,
- haddr_t addr,
- size_t * size_ptr,
- hbool_t * in_cache_ptr,
- hbool_t * is_dirty_ptr,
- hbool_t * is_protected_ptr,
- hbool_t * is_pinned_ptr,
- hbool_t * is_flush_dep_parent_ptr,
- hbool_t * is_flush_dep_child_ptr);
-
-H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t * cache_ptr,
- hbool_t * evictions_enabled_ptr);
-
-H5_DLL herr_t H5C_get_trace_file_ptr(const H5C_t *cache_ptr,
- FILE **trace_file_ptr_ptr);
-H5_DLL herr_t H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr,
- FILE **trace_file_ptr_ptr);
-
-H5_DLL herr_t H5C_insert_entry(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- const H5C_class_t * type,
- haddr_t addr,
- void * thing,
- unsigned int flags);
-
-H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- int32_t ce_array_len,
- haddr_t *ce_array_ptr);
-
+/***************************************/
+/* Library-private Function Prototypes */
+/***************************************/
+
+H5_DLL H5C_t *H5C_create(size_t max_cache_size, size_t min_clean_size,
+ int max_type_id, const char *(*type_name_table_ptr),
+ H5C_write_permitted_func_t check_write_permitted, hbool_t write_permitted,
+ H5C_log_flush_func_t log_flush, void *aux_ptr);
+H5_DLL void H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, int32_t version,
+ double hit_rate, enum H5C_resize_status status,
+ size_t old_max_cache_size, size_t new_max_cache_size,
+ size_t old_min_clean_size, size_t new_min_clean_size);
+H5_DLL herr_t H5C_dest(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id);
+H5_DLL herr_t H5C_expunge_entry(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id,
+ const H5C_class_t *type, haddr_t addr, unsigned flags);
+H5_DLL herr_t H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id,
+ unsigned flags);
+H5_DLL herr_t H5C_flush_to_min_clean(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id);
+H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t *cache_ptr,
+ H5C_auto_size_ctl_t *config_ptr);
+H5_DLL herr_t H5C_get_cache_size(H5C_t *cache_ptr, size_t *max_size_ptr,
+ size_t *min_clean_size_ptr, size_t *cur_size_ptr,
+ int32_t *cur_num_entries_ptr);
+H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t *cache_ptr, double *hit_rate_ptr);
+H5_DLL herr_t H5C_get_entry_status(const H5F_t *f, haddr_t addr,
+ size_t *size_ptr, hbool_t *in_cache_ptr, hbool_t *is_dirty_ptr,
+ hbool_t *is_protected_ptr, hbool_t *is_pinned_ptr,
+ hbool_t *is_flush_dep_parent_ptr, hbool_t *is_flush_dep_child_ptr);
+H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t *cache_ptr, hbool_t *evictions_enabled_ptr);
+H5_DLL FILE *H5C_get_trace_file_ptr(const H5C_t *cache_ptr);
+H5_DLL FILE *H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr);
+H5_DLL herr_t H5C_insert_entry(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id,
+ const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags);
+H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id, int32_t ce_array_len, haddr_t *ce_array_ptr);
H5_DLL herr_t H5C_mark_entry_dirty(void *thing);
-
-H5_DLL herr_t H5C_move_entry(H5C_t * cache_ptr,
- const H5C_class_t * type,
- haddr_t old_addr,
- haddr_t new_addr);
-
+H5_DLL herr_t H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type,
+ haddr_t old_addr, haddr_t new_addr);
H5_DLL herr_t H5C_pin_protected_entry(void *thing);
-
H5_DLL herr_t H5C_create_flush_dependency(void *parent_thing, void *child_thing);
-
-H5_DLL void * H5C_protect(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- const H5C_class_t * type,
- haddr_t addr,
- void * udata,
- unsigned flags);
-
-H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr);
-
+H5_DLL void * H5C_protect(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id,
+ const H5C_class_t *type, haddr_t addr, void *udata, unsigned flags);
+H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr);
H5_DLL herr_t H5C_resize_entry(void *thing, size_t new_size);
-
H5_DLL herr_t H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
- H5C_auto_size_ctl_t *config_ptr);
-
-H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr,
- hbool_t evictions_enabled);
-
-H5_DLL herr_t H5C_set_prefix(H5C_t * cache_ptr, char * prefix);
-
-H5_DLL herr_t H5C_set_trace_file_ptr(H5C_t * cache_ptr,
- FILE * trace_file_ptr);
-
-H5_DLL herr_t H5C_stats(H5C_t * cache_ptr,
- const char * cache_name,
- hbool_t display_detailed_stats);
-
-H5_DLL void H5C_stats__reset(H5C_t * cache_ptr);
-
-H5_DLL herr_t H5C_dump_cache(H5C_t * cache_ptr,
- const char * cache_name);
-
+ H5C_auto_size_ctl_t *config_ptr);
+H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled);
+H5_DLL herr_t H5C_set_prefix(H5C_t *cache_ptr, char *prefix);
+H5_DLL herr_t H5C_set_trace_file_ptr(H5C_t *cache_ptr, FILE *trace_file_ptr);
+H5_DLL herr_t H5C_stats(H5C_t *cache_ptr, const char *cache_name,
+ hbool_t display_detailed_stats);
+H5_DLL void H5C_stats__reset(H5C_t *cache_ptr);
+H5_DLL herr_t H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name);
H5_DLL herr_t H5C_unpin_entry(void *thing);
-
H5_DLL herr_t H5C_destroy_flush_dependency(void *parent_thing, void *child_thing);
+H5_DLL herr_t H5C_unprotect(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id,
+ const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags);
+H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr,
+ unsigned int tests);
+H5_DLL herr_t H5C_ignore_tags(H5C_t *cache_ptr);
+H5_DLL void H5C_retag_copied_metadata(H5C_t *cache_ptr, haddr_t metadata_tag);
-H5_DLL herr_t H5C_unprotect(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- const H5C_class_t * type,
- haddr_t addr,
- void * thing,
- unsigned int flags);
-
-H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
- unsigned int tests);
-
-H5_DLL herr_t H5C_ignore_tags(H5C_t * cache_ptr);
-
-H5_DLL void H5C_retag_copied_metadata(H5C_t * cache_ptr, haddr_t metadata_tag);
+#ifdef H5_HAVE_PARALLEL
+H5_DLL herr_t H5C_apply_candidate_list(H5F_t *f, hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id, H5C_t *cache_ptr, int num_candidates,
+ haddr_t *candidates_list_ptr, int mpi_rank, int mpi_size);
+H5_DLL herr_t H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr);
+H5_DLL herr_t H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr);
+#endif /* H5_HAVE_PARALLEL */
#ifndef NDEBUG /* debugging functions */
-
H5_DLL herr_t H5C_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr,
- void ** entry_ptr_ptr);
-
-H5_DLL herr_t H5C_verify_entry_type(const H5F_t * f, haddr_t addr,
- const H5C_class_t * expected_type,
- hbool_t * in_cache_ptr,
- hbool_t * type_ok_ptr);
-
+ void **entry_ptr_ptr);
+H5_DLL herr_t H5C_verify_entry_type(const H5F_t *f, haddr_t addr,
+ const H5C_class_t *expected_type, hbool_t *in_cache_ptr,
+ hbool_t *type_ok_ptr);
#endif /* NDEBUG */
#endif /* !_H5Cprivate_H */
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index e22eb3a..af5123c 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -722,7 +722,6 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
unsigned f_ndims; /* The number of dimensions of the file's dataspace */
int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */
H5SL_node_t *curr_node; /* Current node in skip list */
- H5S_sel_type fsel_type; /* Selection type on disk */
char bogus; /* "bogus" buffer to pass to selection iterator */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -836,13 +835,13 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
fm->use_single = FALSE;
/* Get type of selection on disk & in memory */
- if((fsel_type = H5S_GET_SELECT_TYPE(file_space)) < H5S_SEL_NONE)
+ if((fm->fsel_type = H5S_GET_SELECT_TYPE(file_space)) < H5S_SEL_NONE)
HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to get type of selection")
if((fm->msel_type = H5S_GET_SELECT_TYPE(mem_space)) < H5S_SEL_NONE)
HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to get type of selection")
/* If the selection is NONE or POINTS, set the flag to FALSE */
- if(fsel_type == H5S_SEL_POINTS || fsel_type == H5S_SEL_NONE)
+ if(fm->fsel_type == H5S_SEL_POINTS || fm->fsel_type == H5S_SEL_NONE)
sel_hyper_flag = FALSE;
else
sel_hyper_flag = TRUE;
@@ -2014,7 +2013,8 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
/* Determine if we will access all the data in the chunk */
if(dst_accessed_bytes != ctg_store.contig.dset_size ||
- (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size)
+ (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size ||
+ fm->fsel_type == H5S_SEL_POINTS)
entire_chunk = FALSE;
/* Set chunk's [scaled] coordinates */
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index 815dae4..1476229 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -337,6 +337,7 @@ typedef struct H5D_chunk_map_t {
H5S_sel_iter_t mem_iter; /* Iterator for elements in memory selection */
unsigned m_ndims; /* Number of dimensions for memory dataspace */
H5S_sel_type msel_type; /* Selection type in memory */
+ H5S_sel_type fsel_type; /* Selection type in file */
H5SL_t *sel_chunks; /* Skip list containing information for each chunk selected */
diff --git a/src/H5Fint.c b/src/H5Fint.c
index e012ba6..df8e886 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -857,14 +857,6 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTDEC, FAIL, "can't close property list")
- /* Only truncate the file on an orderly close, with write-access */
- if(f->closing && (H5F_ACC_RDWR & H5F_INTENT(f))) {
- /* Truncate the file to the current allocated size */
- if(H5FD_truncate(f->shared->lf, dxpl_id, (unsigned)TRUE) < 0)
- /* Push error, but keep going*/
- HDONE_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "low level truncate failed")
- } /* end if */
-
/* Close the file */
if(H5FD_close(f->shared->lf) < 0)
/* Push error, but keep going*/
@@ -1182,6 +1174,15 @@ H5F_flush(H5F_t *f, hid_t dxpl_id, hbool_t closing)
/* Push error, but keep going*/
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush metadata cache")
+ /* Truncate the file to the current allocated size */
+ if(H5FD_truncate(f->shared->lf, dxpl_id, closing) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "low level truncate failed")
+
+ /* Flush the entire metadata cache again since the EOA could have changed in the truncate call. */
+ if(H5AC_flush(f, dxpl_id) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush metadata cache")
+
/* Set up I/O info for operation */
fio_info.f = f;
if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))