summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/H5AC.c3105
-rw-r--r--src/H5ACpkg.h25
-rw-r--r--src/H5ACprivate.h129
-rw-r--r--src/H5B.c34
-rw-r--r--src/H5B2.c23
-rw-r--r--src/H5B2cache.c967
-rw-r--r--src/H5B2dbg.c13
-rw-r--r--src/H5B2int.c94
-rw-r--r--src/H5B2pkg.h11
-rw-r--r--src/H5B2test.c4
-rw-r--r--src/H5Bcache.c363
-rw-r--r--src/H5Bdbg.c6
-rw-r--r--src/H5C.c3288
-rw-r--r--src/H5Cpkg.h2268
-rw-r--r--src/H5Cprivate.h1673
-rw-r--r--src/H5Dchunk.c8
-rw-r--r--src/H5Dlayout.c2
-rw-r--r--src/H5Dpkg.h1
-rw-r--r--src/H5EA.c31
-rw-r--r--src/H5EAcache.c2035
-rw-r--r--src/H5EAdbg.c14
-rw-r--r--src/H5EAdblkpage.c8
-rw-r--r--src/H5EAdblock.c10
-rw-r--r--src/H5EAhdr.c13
-rw-r--r--src/H5EAiblock.c10
-rw-r--r--src/H5EApkg.h23
-rw-r--r--src/H5EAsblock.c11
-rw-r--r--src/H5FA.c16
-rw-r--r--src/H5FAcache.c1099
-rw-r--r--src/H5FAdbg.c8
-rw-r--r--src/H5FAdblkpage.c8
-rw-r--r--src/H5FAdblock.c10
-rw-r--r--src/H5FAhdr.c13
-rw-r--r--src/H5FApkg.h15
-rw-r--r--src/H5FDfamily.c2
-rw-r--r--src/H5FDpublic.h6
-rw-r--r--src/H5FS.c23
-rw-r--r--src/H5FScache.c1397
-rw-r--r--src/H5FSdbg.c4
-rw-r--r--src/H5FSpkg.h4
-rw-r--r--src/H5FSsection.c33
-rw-r--r--src/H5Fint.c25
-rw-r--r--src/H5Fpkg.h55
-rw-r--r--src/H5Fsuper.c463
-rw-r--r--src/H5Fsuper_cache.c1439
-rw-r--r--src/H5Gcache.c400
-rw-r--r--src/H5Gent.c2
-rw-r--r--src/H5Gnode.c24
-rw-r--r--src/H5Gstab.c22
-rw-r--r--src/H5Gtest.c6
-rw-r--r--src/H5HF.c8
-rw-r--r--src/H5HFcache.c2422
-rw-r--r--src/H5HFdbg.c12
-rw-r--r--src/H5HFdblock.c27
-rw-r--r--src/H5HFhdr.c11
-rw-r--r--src/H5HFiblock.c38
-rw-r--r--src/H5HFiter.c2
-rw-r--r--src/H5HFman.c20
-rw-r--r--src/H5HFpkg.h35
-rw-r--r--src/H5HFsection.c12
-rw-r--r--src/H5HG.c19
-rw-r--r--src/H5HGcache.c498
-rw-r--r--src/H5HGdbg.c2
-rw-r--r--src/H5HGpkg.h2
-rw-r--r--src/H5HL.c25
-rw-r--r--src/H5HLcache.c778
-rw-r--r--src/H5HLdbg.c2
-rw-r--r--src/H5HLpkg.h4
-rw-r--r--src/H5HLprivate.h2
-rw-r--r--src/H5O.c39
-rw-r--r--src/H5Oattribute.c10
-rw-r--r--src/H5Ocache.c1029
-rw-r--r--src/H5Ochunk.c6
-rw-r--r--src/H5Ocopy.c2
-rw-r--r--src/H5Odbg.c2
-rw-r--r--src/H5Oefl.c6
-rw-r--r--src/H5Omessage.c14
-rw-r--r--src/H5Oprivate.h4
-rw-r--r--src/H5Otest.c14
-rw-r--r--src/H5Plapl.c50
-rw-r--r--src/H5SM.c30
-rw-r--r--src/H5SMcache.c781
-rw-r--r--src/H5SMtest.c2
83 files changed, 13789 insertions, 11362 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index c5466be..c6b0b47 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -27,6 +27,10 @@
*-------------------------------------------------------------------------
*/
+/****************/
+/* Module Setup */
+/****************/
+
#define H5AC_PACKAGE /*suppress error about including H5ACpkg */
#define H5C_PACKAGE /*suppress error about including H5Cpkg */
#define H5F_PACKAGE /*suppress error about including H5Fpkg */
@@ -34,10 +38,9 @@
/* Interface initialization */
#define H5_INTERFACE_INIT_FUNC H5AC_init_interface
-#ifdef H5_HAVE_PARALLEL
-#include <mpi.h>
-#endif /* H5_HAVE_PARALLEL */
-
+/***********/
+/* Headers */
+/***********/
#include "H5private.h" /* Generic Functions */
#include "H5ACpkg.h" /* Metadata cache */
#include "H5Cpkg.h" /* Cache */
@@ -51,13 +54,16 @@
#include "H5Pprivate.h" /* Property lists */
-#ifdef H5_HAVE_PARALLEL
+/****************/
+/* Local Macros */
+/****************/
-/* Declare a free list to manage the H5AC_aux_t struct */
-H5FL_DEFINE_STATIC(H5AC_aux_t);
-#endif /* H5_HAVE_PARALLEL */
+/******************/
+/* Local Typedefs */
+/******************/
+#ifdef H5_HAVE_PARALLEL
/****************************************************************************
*
* structure H5AC_slist_entry_t
@@ -69,143 +75,135 @@ H5FL_DEFINE_STATIC(H5AC_aux_t);
* allocated structure to store these offsets in. This structure serves
* that purpose. Its fields are as follows:
*
- * magic: Unsigned 32 bit integer always set to
- * H5AC__H5AC_SLIST_ENTRY_T_MAGIC. This field is used to
- * validate pointers to instances of H5AC_slist_entry_t.
- *
* addr: file offset of a metadata entry. Entries are added to this
* list (if they aren't there already) when they are marked
* dirty in an unprotect, inserted, or moved. They are
* removed when they appear in a clean entries broadcast.
*
****************************************************************************/
-
-#ifdef H5_HAVE_PARALLEL
-
-#define H5AC__H5AC_SLIST_ENTRY_T_MAGIC 0x00D0A02
-
typedef struct H5AC_slist_entry_t
{
- uint32_t magic;
-
haddr_t addr;
} H5AC_slist_entry_t;
-/* Declare a free list to manage the H5AC_slist_entry_t struct */
-H5FL_DEFINE_STATIC(H5AC_slist_entry_t);
+/* User data for address list building callbacks */
+typedef struct H5AC_addr_list_ud_t
+{
+ H5AC_aux_t * aux_ptr; /* 'Auxiliary' parallel cache info */
+ haddr_t * addr_buf_ptr; /* Array to store addresses */
+ int i; /* Counter for position in array */
+} H5AC_addr_list_ud_t;
+#endif /* H5_HAVE_PARALLEL */
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static herr_t H5AC__check_if_write_permitted(const H5F_t *f,
+ hbool_t *write_permitted_ptr);
+static herr_t H5AC__ext_config_2_int_config(H5AC_cache_config_t *ext_conf_ptr,
+ H5C_auto_size_ctl_t *int_conf_ptr);
+#ifdef H5_HAVE_PARALLEL
+static herr_t H5AC__broadcast_candidate_list(H5AC_t *cache_ptr,
+ int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+static herr_t H5AC__broadcast_clean_list(H5AC_t *cache_ptr);
+static herr_t H5AC__construct_candidate_list(H5AC_t *cache_ptr,
+ H5AC_aux_t *aux_ptr, int sync_point_op);
+static herr_t H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr,
+ int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+static herr_t H5AC__flush_entries(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__log_deleted_entry(const H5AC_info_t *entry_ptr);
+static herr_t H5AC__log_dirtied_entry(const H5AC_info_t *entry_ptr);
+static herr_t H5AC__log_flushed_entry(H5C_t *cache_ptr, haddr_t addr,
+ hbool_t was_dirty, unsigned flags);
+static herr_t H5AC__log_inserted_entry(const H5AC_info_t *entry_ptr);
+static herr_t H5AC__log_moved_entry(const H5F_t *f, haddr_t old_addr,
+ haddr_t new_addr);
+static herr_t H5AC__propagate_and_apply_candidate_list(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__propagate_flushed_and_still_clean_entries_list(H5F_t *f,
+ hid_t dxpl_id);
+static herr_t H5AC__receive_haddr_list(MPI_Comm mpi_comm, int *num_entries_ptr,
+ haddr_t **haddr_buf_ptr_ptr);
+static herr_t H5AC__receive_candidate_list(const H5AC_t *cache_ptr,
+ int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+static herr_t H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, int num_candidates,
+ haddr_t *candidates_list_ptr);
+static herr_t H5AC__rsp__dist_md_write__flush(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__rsp__dist_md_write__flush_to_min_clean(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__rsp__p0_only__flush(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__rsp__p0_only__flush_to_min_clean(H5F_t *f, hid_t dxpl_id);
+static herr_t H5AC__run_sync_point(H5F_t *f, hid_t dxpl_id, int sync_point_op);
#endif /* H5_HAVE_PARALLEL */
-/*
- * Private file-scope variables.
- */
+/*********************/
+/* Package Variables */
+/*********************/
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
/* Default dataset transfer property list for metadata I/O calls */
/* (Collective set, "block before metadata write" set and "library internal" set) */
/* (Global variable definition, declaration is in H5ACprivate.h also) */
-hid_t H5AC_dxpl_id=(-1);
+hid_t H5AC_dxpl_id = (-1);
/* Dataset transfer property list for independent metadata I/O calls */
/* (just "library internal" set - i.e. independent transfer mode) */
/* (Global variable definition, declaration is in H5ACprivate.h also) */
H5P_genplist_t *H5AC_ind_dxpl_g = NULL;
-hid_t H5AC_ind_dxpl_id=(-1);
-
-
-/*
- * Private file-scope function declarations:
- */
+hid_t H5AC_ind_dxpl_id = (-1);
-static herr_t H5AC_check_if_write_permitted(const H5F_t *f,
- hid_t dxpl_id,
- hbool_t * write_permitted_ptr);
-static herr_t H5AC_ext_config_2_int_config(H5AC_cache_config_t * ext_conf_ptr,
- H5C_auto_size_ctl_t * int_conf_ptr);
+/*******************/
+/* Local Variables */
+/*******************/
#ifdef H5_HAVE_PARALLEL
-static herr_t H5AC_broadcast_candidate_list(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr);
-
-static herr_t H5AC_broadcast_clean_list(H5AC_t * cache_ptr);
-
-static herr_t H5AC_construct_candidate_list(H5AC_t * cache_ptr,
- H5AC_aux_t * aux_ptr,
- int sync_point_op);
-
-static herr_t H5AC_copy_candidate_list_to_buffer(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr,
- size_t * MPI_Offset_buf_size_ptr,
- MPI_Offset ** MPI_Offset_buf_ptr_ptr);
-
-static herr_t H5AC_flush_entries(H5F_t *f);
-
-static herr_t H5AC_log_deleted_entry(H5AC_t * cache_ptr,
- H5AC_info_t * entry_ptr,
- haddr_t addr,
- unsigned int flags);
-
-static herr_t H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
- haddr_t addr);
-
-static herr_t H5AC_log_flushed_entry(H5C_t * cache_ptr,
- haddr_t addr,
- hbool_t was_dirty,
- unsigned flags,
- int type_id);
-
-static herr_t H5AC_log_moved_entry(const H5F_t * f,
- haddr_t old_addr,
- haddr_t new_addr);
-
-static herr_t H5AC_log_inserted_entry(H5AC_t * cache_ptr,
- H5AC_info_t * entry_ptr);
-
-static herr_t H5AC_propagate_and_apply_candidate_list(H5F_t * f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
-
-static herr_t H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
-
-static herr_t H5AC_receive_candidate_list(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr);
-
-static herr_t H5AC_receive_and_apply_clean_list(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- H5AC_t * cache_ptr);
-
-static herr_t H5AC_tidy_cache_0_lists(H5AC_t * cache_ptr,
- int num_candidates,
- haddr_t * candidates_list_ptr);
-
-herr_t H5AC_rsp__dist_md_write__flush(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
-
-herr_t H5AC_rsp__dist_md_write__flush_to_min_clean(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
-
-herr_t H5AC_rsp__p0_only__flush(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
+/* Declare a free list to manage the H5AC_aux_t struct */
+H5FL_DEFINE_STATIC(H5AC_aux_t);
-herr_t H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr);
+/* Declare a free list to manage the H5AC_slist_entry_t struct */
+H5FL_DEFINE_STATIC(H5AC_slist_entry_t);
+#endif /* H5_HAVE_PARALLEL */
-static herr_t H5AC_run_sync_point(H5F_t *f,
- hid_t dxpl_id,
- int sync_point_op);
+static const char *H5AC_entry_type_names[H5AC_NTYPES] =
+{
+ "B-tree nodes",
+ "symbol table nodes",
+ "local heap prefixes",
+ "local heap data blocks",
+ "global heaps",
+ "object headers",
+ "object header chunks",
+ "v2 B-tree headers",
+ "v2 B-tree internal nodes",
+ "v2 B-tree leaf nodes",
+ "fractal heap headers",
+ "fractal heap direct blocks",
+ "fractal heap indirect blocks",
+ "free space headers",
+ "free space sections",
+ "shared OH message master table",
+ "shared OH message index",
+ "extensible array headers",
+ "extensible array index blocks",
+ "extensible array super blocks",
+ "extensible array data blocks",
+ "extensible array data block pages",
+ "fixed array headers",
+ "fixed array data block",
+ "fixed array data block pages",
+ "superblock",
+ "driver info",
+ "test entry" /* for testing only -- not used for actual files */
+};
-#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
@@ -232,7 +230,7 @@ H5AC_init(void)
done:
FUNC_LEAVE_NOAPI(ret_value)
-}
+} /* end H5AC_init() */
/*-------------------------------------------------------------------------
@@ -273,8 +271,8 @@ H5AC_init_interface(void)
/* Insert 'collective metadata write' property */
coll_meta_write = 1;
if(H5P_insert(xfer_plist, H5AC_COLLECTIVE_META_WRITE_NAME, H5AC_COLLECTIVE_META_WRITE_SIZE, &coll_meta_write,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
/* Get an ID for the independent H5AC dxpl */
if((H5AC_ind_dxpl_id = H5P_create_id(H5P_CLS_DATASET_XFER_g, FALSE)) < 0)
@@ -287,8 +285,8 @@ H5AC_init_interface(void)
/* Insert 'collective metadata write' property */
coll_meta_write = 0;
if(H5P_insert(H5AC_ind_dxpl_g, H5AC_COLLECTIVE_META_WRITE_NAME, H5AC_COLLECTIVE_META_WRITE_SIZE, &coll_meta_write,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't insert metadata cache dxpl property")
#else /* H5_HAVE_PARALLEL */
/* Sanity check */
HDassert(H5P_LST_DATASET_XFER_ID_g!=(-1));
@@ -297,7 +295,7 @@ H5AC_init_interface(void)
H5AC_ind_dxpl_id = H5P_DATASET_XFER_DEFAULT;
/* Get the property list objects for the IDs */
- if (NULL == (H5AC_ind_dxpl_g = (H5P_genplist_t *)H5I_object(H5AC_ind_dxpl_id)))
+ if(NULL == (H5AC_ind_dxpl_g = (H5P_genplist_t *)H5I_object(H5AC_ind_dxpl_id)))
HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get property list object")
#endif /* H5_HAVE_PARALLEL */
@@ -328,15 +326,14 @@ H5AC_term_interface(void)
FUNC_ENTER_NOAPI_NOINIT_NOERR
- if (H5_interface_initialize_g) {
+ if(H5_interface_initialize_g) {
#ifdef H5_HAVE_PARALLEL
if(H5AC_dxpl_id > 0 || H5AC_ind_dxpl_id > 0) {
/* Indicate more work to do */
n = 1; /* H5I */
/* Close H5AC dxpl */
- if(H5I_dec_ref(H5AC_dxpl_id) < 0 ||
- H5I_dec_ref(H5AC_ind_dxpl_id) < 0)
+ if(H5I_dec_ref(H5AC_dxpl_id) < 0 || H5I_dec_ref(H5AC_ind_dxpl_id) < 0)
H5E_clear_stack(NULL); /*ignore error*/
else {
/* Reset static IDs */
@@ -347,50 +344,22 @@ H5AC_term_interface(void)
H5_interface_initialize_g = 0;
} /* end else */
} /* end if */
- else
-#else /* H5_HAVE_PARALLEL */
+ else {
+#endif /* H5_HAVE_PARALLEL */
/* Reset static IDs */
- H5AC_dxpl_id=(-1);
- H5AC_ind_dxpl_id=(-1);
+ H5AC_dxpl_id = (-1);
+ H5AC_ind_dxpl_id = (-1);
+#ifdef H5_HAVE_PARALLEL
+ } /* end else */
#endif /* H5_HAVE_PARALLEL */
- /* Reset interface initialization flag */
- H5_interface_initialize_g = 0;
+
+ /* Reset interface initialization flag */
+ H5_interface_initialize_g = 0;
} /* end if */
FUNC_LEAVE_NOAPI(n)
} /* end H5AC_term_interface() */
-static const char * H5AC_entry_type_names[H5AC_NTYPES] =
-{
- "B-tree nodes",
- "symbol table nodes",
- "local heap prefixes",
- "local heap data blocks",
- "global heaps",
- "object headers",
- "object header chunks",
- "v2 B-tree headers",
- "v2 B-tree internal nodes",
- "v2 B-tree leaf nodes",
- "fractal heap headers",
- "fractal heap direct blocks",
- "fractal heap indirect blocks",
- "free space headers",
- "free space sections",
- "shared OH message master table",
- "shared OH message index",
- "extensible array headers",
- "extensible array index blocks",
- "extensible array super blocks",
- "extensible array data blocks",
- "extensible array data block pages",
- "fixed array headers",
- "fixed array data block",
- "fixed array data block pages",
- "superblock",
- "test entry" /* for testing only -- not used for actual files */
-};
-
/*-------------------------------------------------------------------------
* Function: H5AC_create
@@ -411,8 +380,7 @@ static const char * H5AC_entry_type_names[H5AC_NTYPES] =
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_create(const H5F_t *f,
- H5AC_cache_config_t *config_ptr)
+H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
{
#ifdef H5_HAVE_PARALLEL
char prefix[H5C__PREFIX_LEN] = "";
@@ -422,6 +390,7 @@ H5AC_create(const H5F_t *f,
FUNC_ENTER_NOAPI(FAIL)
+ /* Check arguments */
HDassert(f);
HDassert(NULL == f->shared->cache);
HDassert(config_ptr != NULL) ;
@@ -429,7 +398,7 @@ H5AC_create(const H5F_t *f,
HDcompile_assert(H5C__MAX_NUM_TYPE_IDS == H5AC_NTYPES);
if(H5AC_validate_config(config_ptr) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Bad cache configuration")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache configuration")
#ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
@@ -467,11 +436,8 @@ H5AC_create(const H5F_t *f,
aux_ptr->move_dirty_bytes_updates = 0;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
aux_ptr->d_slist_ptr = NULL;
- aux_ptr->d_slist_len = 0;
aux_ptr->c_slist_ptr = NULL;
- aux_ptr->c_slist_len = 0;
aux_ptr->candidate_slist_ptr = NULL;
- aux_ptr->candidate_slist_len = 0;
aux_ptr->write_done = NULL;
aux_ptr->sync_point_done = NULL;
@@ -492,66 +458,50 @@ H5AC_create(const H5F_t *f,
if(NULL == (aux_ptr->candidate_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create candidate entry list.")
- if(aux_ptr != NULL) {
- if(aux_ptr->mpi_rank == 0) {
+ if(aux_ptr != NULL)
+ if(aux_ptr->mpi_rank == 0)
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
- H5AC__DEFAULT_MIN_CLEAN_SIZE,
- (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
- H5AC_check_if_write_permitted,
- TRUE,
- H5AC_log_flushed_entry,
- (void *)aux_ptr);
- } else {
+ H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
+ (const char **)H5AC_entry_type_names,
+ H5AC__check_if_write_permitted, TRUE, H5AC__log_flushed_entry,
+ (void *)aux_ptr);
+ else
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
- H5AC__DEFAULT_MIN_CLEAN_SIZE,
- (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
- H5AC_check_if_write_permitted,
- TRUE,
- NULL,
- (void *)aux_ptr);
- }
- } else {
+ H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
+ (const char **)H5AC_entry_type_names,
+ H5AC__check_if_write_permitted, TRUE, NULL,
+ (void *)aux_ptr);
+ else
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
- H5AC__DEFAULT_MIN_CLEAN_SIZE,
- (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
- H5AC_check_if_write_permitted,
- TRUE,
- NULL,
- NULL);
- }
- } else {
+ H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
+ (const char **)H5AC_entry_type_names,
+ H5AC__check_if_write_permitted, TRUE, NULL, NULL);
+ } /* end if */
+ else {
#endif /* H5_HAVE_PARALLEL */
/* The default max cache size and min clean size will frequently be
* overwritten shortly by the subsequent set resize config call.
* -- JRM
*/
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
- H5AC__DEFAULT_MIN_CLEAN_SIZE,
- (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
- H5AC_check_if_write_permitted,
- TRUE,
- NULL,
- NULL);
+ H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
+ (const char **)H5AC_entry_type_names,
+ H5AC__check_if_write_permitted, TRUE, NULL, NULL);
#ifdef H5_HAVE_PARALLEL
- }
+ } /* end else */
#endif /* H5_HAVE_PARALLEL */
if(NULL == f->shared->cache)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed")
#ifdef H5_HAVE_PARALLEL
- if(aux_ptr != NULL) {
+ if(aux_ptr != NULL)
if(H5C_set_prefix(f->shared->cache, prefix) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "H5C_set_prefix() failed")
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "H5C_set_prefix() failed")
#endif /* H5_HAVE_PARALLEL */
if(H5AC_set_cache_auto_resize_config(f->shared->cache, config_ptr) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "auto resize configuration failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "auto resize configuration failed")
done:
#ifdef H5_HAVE_PARALLEL
@@ -560,13 +510,10 @@ done:
if(aux_ptr != NULL) {
if(aux_ptr->d_slist_ptr != NULL)
H5SL_close(aux_ptr->d_slist_ptr);
-
if(aux_ptr->c_slist_ptr != NULL)
H5SL_close(aux_ptr->c_slist_ptr);
-
if(aux_ptr->candidate_slist_ptr != NULL)
H5SL_close(aux_ptr->candidate_slist_ptr);
-
aux_ptr->magic = 0;
aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr);
} /* end if */
@@ -623,12 +570,12 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
/* Attempt to flush all entries from rank 0 & Bcast clean list to other ranks */
- if(H5AC_flush_entries(f) < 0)
+ if(H5AC__flush_entries(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
#endif /* H5_HAVE_PARALLEL */
/* Destroy the cache */
- if(H5C_dest(f, dxpl_id, H5AC_dxpl_id) < 0)
+ if(H5C_dest(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "can't destroy cache")
f->shared->cache = NULL;
@@ -665,13 +612,9 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_expunge_entry(H5F_t *f,
- hid_t dxpl_id,
- const H5AC_class_t *type,
- haddr_t addr,
- unsigned flags)
+H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
+ haddr_t addr, unsigned flags)
{
- herr_t result;
#if H5AC__TRACE_FILE_ENABLED
char trace[128] = "";
FILE * trace_file_ptr = NULL;
@@ -680,12 +623,12 @@ H5AC_expunge_entry(H5F_t *f,
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
HDassert(type);
- HDassert(type->clear);
- HDassert(type->dest);
+ HDassert(type->serialize);
HDassert(H5F_addr_defined(addr));
#if H5AC__TRACE_FILE_ENABLED
@@ -696,41 +639,21 @@ H5AC_expunge_entry(H5F_t *f,
* necessary in the trace file. Write the return value to catch occult
* errors.
*/
- if ( ( cache_ptr != NULL ) &&
- ( H5C_get_trace_file_ptr(cache_ptr, &trace_file_ptr) >= 0 ) &&
- ( trace_file_ptr != NULL ) ) {
-
- sprintf(trace, "H5AC_expunge_entry 0x%lx %d",
- (unsigned long)addr,
- (int)(type->id));
- }
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ sprintf(trace, "%s 0x%lx %d", FUNC, (unsigned long)addr, (int)(type->id));
}
#endif /* H5AC__TRACE_FILE_ENABLED */
- result = H5C_expunge_entry(f,
- dxpl_id,
- H5AC_dxpl_id,
- type,
- addr,
- flags);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, \
- "H5C_expunge_entry() failed.")
- }
+ if(H5C_expunge_entry(f, dxpl_id, type, addr, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "H5C_expunge_entry() failed.")
done:
-
#if H5AC__TRACE_FILE_ENABLED
- if ( trace_file_ptr != NULL ) {
-
+ if(trace_file_ptr != NULL)
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
- }
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_expunge_entry() */
@@ -765,6 +688,7 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id)
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
@@ -773,22 +697,19 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id)
/* For the flush, only the flags are really necessary in the trace file.
* Write the result to catch occult errors.
*/
- if((f != NULL) &&
- (f->shared != NULL) &&
- (f->shared->cache != NULL) &&
- (H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
- (trace_file_ptr != NULL))
- sprintf(trace, "H5AC_flush");
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ sprintf(trace, "%s", FUNC);
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
/* Attempt to flush all entries from rank 0 & Bcast clean list to other ranks */
- if(H5AC_flush_entries(f) < 0)
+ if(H5AC__flush_entries(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
#endif /* H5_HAVE_PARALLEL */
/* Flush the cache */
- if(H5C_flush_cache(f, dxpl_id, H5AC_dxpl_id, H5AC__NO_FLAGS_SET) < 0)
+ /* (Again, in parallel - writes out the superblock) */
+ if(H5C_flush_cache(f, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache.")
done:
@@ -808,12 +729,12 @@ done:
* cache contains an entry at that location. If it does,
* also determine whether the entry is dirty, protected,
* pinned, etc. and return that information to the caller
- * in *status_ptr.
+ * in *status.
*
* If the specified entry doesn't exist, set *status_ptr
* to zero.
*
- * On error, the value of *status_ptr is undefined.
+ * On error, the value of *status is undefined.
*
* Return: Non-negative on success/Negative on failure
*
@@ -823,44 +744,40 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_get_entry_status(const H5F_t *f,
- haddr_t addr,
- unsigned * status_ptr)
+H5AC_get_entry_status(const H5F_t *f, haddr_t addr, unsigned *status)
{
- hbool_t in_cache;
- hbool_t is_dirty;
- hbool_t is_protected;
- hbool_t is_pinned;
- hbool_t is_flush_dep_child;
- hbool_t is_flush_dep_parent;
- size_t entry_size;
- unsigned status = 0;
+ hbool_t in_cache; /* Entry @ addr is in the cache */
+ hbool_t is_dirty; /* Entry @ addr is in the cache and dirty */
+ hbool_t is_protected; /* Entry @ addr is in the cache and protected */
+ hbool_t is_pinned; /* Entry @ addr is in the cache and pinned */
+ hbool_t is_flush_dep_child; /* Entry @ addr is in the cache and is a flush dependency child */
+ hbool_t is_flush_dep_parent; /* Entry @ addr is in the cache and is a flush dependency parent */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if((f == NULL) || (!H5F_addr_defined(addr)) || (status_ptr == NULL))
+ if((f == NULL) || (!H5F_addr_defined(addr)) || (status == NULL))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry.")
- if(H5C_get_entry_status(f, addr, &entry_size, &in_cache, &is_dirty,
+ if(H5C_get_entry_status(f, addr, NULL, &in_cache, &is_dirty,
&is_protected, &is_pinned, &is_flush_dep_parent, &is_flush_dep_child) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_status() failed.")
if(in_cache) {
- status |= H5AC_ES__IN_CACHE;
+ *status |= H5AC_ES__IN_CACHE;
if(is_dirty)
- status |= H5AC_ES__IS_DIRTY;
+ *status |= H5AC_ES__IS_DIRTY;
if(is_protected)
- status |= H5AC_ES__IS_PROTECTED;
+ *status |= H5AC_ES__IS_PROTECTED;
if(is_pinned)
- status |= H5AC_ES__IS_PINNED;
+ *status |= H5AC_ES__IS_PINNED;
if(is_flush_dep_parent)
- status |= H5AC_ES__IS_FLUSH_DEP_PARENT;
+ *status |= H5AC_ES__IS_FLUSH_DEP_PARENT;
if(is_flush_dep_child)
- status |= H5AC_ES__IS_FLUSH_DEP_CHILD;
+ *status |= H5AC_ES__IS_FLUSH_DEP_CHILD;
} /* end if */
-
- *status_ptr = status;
+ else
+ *status = 0;
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -895,12 +812,12 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
HDassert(type);
- HDassert(type->flush);
- HDassert(type->size);
+ HDassert(type->serialize);
HDassert(H5F_addr_defined(addr));
HDassert(thing);
@@ -916,28 +833,19 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
* Note that some data is not available right now -- put what we can
* in the trace buffer now, and fill in the rest at the end.
*/
- if ( ( f != NULL ) &&
- ( f->shared != NULL ) &&
- ( f->shared->cache != NULL ) &&
- ( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
- ( trace_file_ptr != NULL ) ) {
-
- sprintf(trace, "H5AC_insert_entry 0x%lx %d 0x%x",
- (unsigned long)addr,
- type->id,
- flags);
- }
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ sprintf(trace, "%s 0x%lx %d 0x%x", FUNC, (unsigned long)addr, type->id,
+ flags);
#endif /* H5AC__TRACE_FILE_ENABLED */
/* Insert entry into metadata cache */
- if(H5C_insert_entry(f, dxpl_id, H5AC_dxpl_id, type, addr, thing, flags) < 0)
+ if(H5C_insert_entry(f, dxpl_id, type, addr, thing, flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C_insert_entry() failed")
#if H5AC__TRACE_FILE_ENABLED
- if(trace_file_ptr != NULL) {
+ if(trace_file_ptr != NULL)
/* make note of the entry size */
trace_entry_size = ((H5C_cache_entry_t *)thing)->size;
- }
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
@@ -946,12 +854,12 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
if(NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr)) {
/* Log the new entry */
- if(H5AC_log_inserted_entry(f->shared->cache, (H5AC_info_t *)thing) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5AC_log_inserted_entry() failed")
+ if(H5AC__log_inserted_entry((H5AC_info_t *)thing) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5AC__log_inserted_entry() failed")
/* Check if we should try to flush */
if(aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)
- if(H5AC_run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ if(H5AC__run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
} /* end if */
}
@@ -959,11 +867,8 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
done:
#if H5AC__TRACE_FILE_ENABLED
- if(trace_file_ptr != NULL) {
- HDfprintf(trace_file_ptr, "%s %d %d\n", trace,
- (int)trace_entry_size,
- (int)ret_value);
- }
+ if(trace_file_ptr != NULL)
+ HDfprintf(trace_file_ptr, "%s %d %d\n", trace, (int)trace_entry_size, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
@@ -1002,8 +907,7 @@ H5AC_mark_entry_dirty(void *thing)
* is really necessary in the trace file. Write the result to catch
* occult errors.
*/
- if((H5C_get_trace_file_ptr_from_entry((const H5C_cache_entry_t *) thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(thing)))
sprintf(trace, "%s 0x%lx", FUNC,
(unsigned long)(((H5C_cache_entry_t *)thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1017,10 +921,9 @@ H5AC_mark_entry_dirty(void *thing)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
if((!entry_ptr->is_dirty) && (!entry_ptr->is_protected) &&
- (entry_ptr->is_pinned) && (NULL != cache_ptr->aux_ptr)) {
- if(H5AC_log_dirtied_entry(entry_ptr, entry_ptr->addr) < 0)
+ (entry_ptr->is_pinned) && (NULL != cache_ptr->aux_ptr))
+ if(H5AC__log_dirtied_entry(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't log dirtied entry")
- } /* end if */
}
#endif /* H5_HAVE_PARALLEL */
@@ -1059,12 +962,13 @@ H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t ne
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
- H5AC_aux_t * aux_ptr;
+ H5AC_aux_t *aux_ptr;
#endif /* H5_HAVE_PARALLEL */
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared->cache);
HDassert(type);
@@ -1077,25 +981,16 @@ H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t ne
* necessary in the trace file. Include the type id so we don't have to
* look it up. Also write the result to catch occult errors.
*/
- if ( ( f != NULL ) &&
- ( f->shared != NULL ) &&
- ( f->shared->cache != NULL ) &&
- ( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
- ( trace_file_ptr != NULL ) ) {
-
- sprintf(trace, "H5AC_move_entry 0x%lx 0x%lx %d",
- (unsigned long)old_addr,
- (unsigned long)new_addr,
- (int)(type->id));
- }
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ sprintf(trace, "%s 0x%lx 0x%lx %d", FUNC, (unsigned long)old_addr,
+ (unsigned long)new_addr, (int)(type->id));
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
/* Log moving the entry */
- if(NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr)) {
- if(H5AC_log_moved_entry(f, old_addr, new_addr) < 0)
+ if(NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr))
+ if(H5AC__log_moved_entry(f, old_addr, new_addr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log moved entry")
- } /* end if */
#endif /* H5_HAVE_PARALLEL */
if(H5C_move_entry(f->shared->cache, type, old_addr, new_addr) < 0)
@@ -1103,10 +998,9 @@ H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t ne
#ifdef H5_HAVE_PARALLEL
/* Check if we should try to flush */
- if(NULL != aux_ptr && aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold) {
- if(H5AC_run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ if(NULL != aux_ptr && aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)
+ if(H5AC__run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
- } /* end if */
#endif /* H5_HAVE_PARALLEL */
done:
@@ -1150,8 +1044,7 @@ H5AC_pin_protected_entry(void *thing)
/* For the pin protected entry call, only the addr is really necessary
* in the trace file. Also write the result to catch occult errors.
*/
- if((H5C_get_trace_file_ptr_from_entry((const H5C_cache_entry_t *)thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(thing)))
sprintf(trace, "%s 0x%lx", FUNC,
(unsigned long)(((H5C_cache_entry_t *)thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1198,10 +1091,8 @@ H5AC_create_flush_dependency(void * parent_thing, void * child_thing)
HDassert(child_thing);
#if H5AC__TRACE_FILE_ENABLED
- if((H5C_get_trace_file_ptr_from_entry((H5C_cache_entry_t *)parent_thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
- sprintf(trace, "%s %lx %lx",
- FUNC,
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(parent_thing)))
+ sprintf(trace, "%s %lx %lx", FUNC,
(unsigned long)(((H5C_cache_entry_t *)parent_thing)->addr),
(unsigned long)(((H5C_cache_entry_t *)child_thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1244,15 +1135,10 @@ done:
*-------------------------------------------------------------------------
*/
void *
-H5AC_protect(H5F_t *f,
- hid_t dxpl_id,
- const H5AC_class_t *type,
- haddr_t addr,
- void *udata,
- H5AC_protect_t rw)
+H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
+ void *udata, unsigned flags)
{
- unsigned protect_flags = H5C__NO_FLAGS_SET;
- void * thing = (void *)NULL;
+ void * thing; /* Pointer to native data structure for entry */
#if H5AC__TRACE_FILE_ENABLED
char trace[128] = "";
size_t trace_entry_size = 0;
@@ -1262,93 +1148,59 @@ H5AC_protect(H5F_t *f,
FUNC_ENTER_NOAPI(NULL)
- /* check args */
+ /* Sanity check */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
HDassert(type);
- HDassert(type->flush);
- HDassert(type->load);
+ HDassert(type->serialize);
HDassert(H5F_addr_defined(addr));
+ /* Check for unexpected flags -- H5C__FLUSH_COLLECTIVELY_FLAG
+ * only permitted in the parallel case.
+ */
+#ifdef H5_HAVE_PARALLEL
+ HDassert(0 == (flags & (unsigned)(~(H5C__READ_ONLY_FLAG | \
+ H5C__FLUSH_LAST_FLAG | \
+ H5C__FLUSH_COLLECTIVELY_FLAG))));
+#else /* H5_HAVE_PARALLEL */
+ HDassert(0 == (flags & (unsigned)(~(H5C__READ_ONLY_FLAG | \
+ H5C__FLUSH_LAST_FLAG))));
+#endif /* H5_HAVE_PARALLEL */
+
/* Check for invalid access request */
- if(0 == (H5F_INTENT(f) & H5F_ACC_RDWR) && rw == H5AC_WRITE)
+ if((0 == (H5F_INTENT(f) & H5F_ACC_RDWR)) && (0 == (flags & H5C__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "no write intent on file")
#if H5AC__TRACE_FILE_ENABLED
- /* For the protect call, only the addr and type id is really necessary
- * in the trace file. Include the size of the entry protected as a
- * sanity check. Also indicate whether the call was successful to
- * catch occult errors.
+ /* For the protect call, only the addr, size, type id, and flags are
+ * necessary in the trace file. Also indicate whether the call was
+ * successful to catch occult errors.
*/
- if ( ( f != NULL ) &&
- ( f->shared != NULL ) &&
- ( f->shared->cache != NULL ) &&
- ( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
- ( trace_file_ptr != NULL ) ) {
-
- const char * rw_string;
-
- if ( rw == H5AC_WRITE ) {
-
- rw_string = "H5AC_WRITE";
-
- } else if ( rw == H5AC_READ ) {
-
- rw_string = "H5AC_READ";
-
- } else {
-
- rw_string = "???";
- }
-
- sprintf(trace, "H5AC_protect 0x%lx %d %s",
- (unsigned long)addr,
- (int)(type->id),
- rw_string);
- }
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ sprintf(trace, "%s 0x%lx %d 0x%x", FUNC, (unsigned long)addr,
+ (int)(type->id), flags);
#endif /* H5AC__TRACE_FILE_ENABLED */
- if ( rw == H5AC_READ )
- protect_flags |= H5C__READ_ONLY_FLAG;
-
- thing = H5C_protect(f,
- dxpl_id,
- H5AC_dxpl_id,
- type,
- addr,
- udata,
- protect_flags);
-
- if ( thing == NULL ) {
-
+ if(NULL == (thing = H5C_protect(f, dxpl_id, type, addr, udata, flags)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_protect() failed.")
- }
#if H5AC__TRACE_FILE_ENABLED
- if ( trace_file_ptr != NULL ) {
-
- /* make note of the entry size */
+ if(trace_file_ptr != NULL)
+ /* Make note of the entry size */
trace_entry_size = ((H5C_cache_entry_t *)thing)->size;
- }
#endif /* H5AC__TRACE_FILE_ENABLED */
/* Set return value */
ret_value = thing;
done:
-
#if H5AC__TRACE_FILE_ENABLED
- if ( trace_file_ptr != NULL ) {
-
- HDfprintf(trace_file_ptr, "%s %d %d\n", trace,
- (int)trace_entry_size,
- (int)(ret_value != NULL));
- }
+ if(trace_file_ptr != NULL)
+ HDfprintf(trace_file_ptr, "%s %d %d\n", trace, (int)trace_entry_size, (int)(ret_value != NULL));
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_protect() */
@@ -1383,8 +1235,7 @@ H5AC_resize_entry(void *thing, size_t new_size)
* really necessary in the trace file. Write the result to catch
* occult errors.
*/
- if((H5C_get_trace_file_ptr_from_entry((H5C_cache_entry_t *)thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(thing)))
sprintf(trace, "%s 0x%lx %d", FUNC,
(unsigned long)(((H5C_cache_entry_t *)thing)->addr),
(int)new_size);
@@ -1401,10 +1252,9 @@ H5AC_resize_entry(void *thing, size_t new_size)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- if((!entry_ptr->is_dirty) && (NULL != cache_ptr->aux_ptr)) {
- if(H5AC_log_dirtied_entry(entry_ptr, entry_ptr->addr) < 0)
+ if((!entry_ptr->is_dirty) && (NULL != cache_ptr->aux_ptr))
+ if(H5AC__log_dirtied_entry(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't log dirtied entry")
- } /* end if */
}
#endif /* H5_HAVE_PARALLEL */
@@ -1449,8 +1299,7 @@ H5AC_unpin_entry(void *thing)
/* For the unpin entry call, only the addr is really necessary
* in the trace file. Also write the result to catch occult errors.
*/
- if((H5C_get_trace_file_ptr_from_entry((H5C_cache_entry_t *)thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(thing)))
sprintf(trace, "%s 0x%lx", FUNC,
(unsigned long)(((H5C_cache_entry_t *)thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1496,10 +1345,8 @@ H5AC_destroy_flush_dependency(void * parent_thing, void * child_thing)
HDassert(child_thing);
#if H5AC__TRACE_FILE_ENABLED
- if((H5C_get_trace_file_ptr_from_entry((H5C_cache_entry_t *)parent_thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
- sprintf(trace, "%s %llx %llx",
- FUNC,
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(parent_thing)))
+ sprintf(trace, "%s %llx %llx", FUNC,
(unsigned long long)(((H5C_cache_entry_t *)parent_thing)->addr),
(unsigned long long)(((H5C_cache_entry_t *)child_thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1572,12 +1419,13 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
HDassert(type);
- HDassert(type->clear);
- HDassert(type->flush);
+ HDassert(type->deserialize);
+ HDassert(type->image_len);
HDassert(H5F_addr_defined(addr));
HDassert(thing);
HDassert( ((H5AC_info_t *)thing)->addr == addr );
@@ -1588,66 +1436,55 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
* new size are really necessary in the trace file. Write the return
* value to catch occult errors.
*/
- if ( ( f != NULL ) &&
- ( f->shared != NULL ) &&
- ( f->shared->cache != NULL ) &&
- ( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
- ( trace_file_ptr != NULL ) ) {
-
- sprintf(trace, "H5AC_unprotect 0x%lx %d",
- (unsigned long)addr,
- (int)(type->id));
- }
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ sprintf(trace, "%s 0x%lx %d", FUNC, (unsigned long)addr, (int)(type->id));
#endif /* H5AC__TRACE_FILE_ENABLED */
- dirtied = (hbool_t)( ( (flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG ) ||
- ( ((H5AC_info_t *)thing)->dirtied ) );
- deleted = (hbool_t)( (flags & H5C__DELETED_FLAG) == H5C__DELETED_FLAG );
+ dirtied = (hbool_t)(((flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG) ||
+ (((H5AC_info_t *)thing)->dirtied));
+ deleted = (hbool_t)((flags & H5C__DELETED_FLAG) == H5C__DELETED_FLAG);
/* Check if the size changed out from underneath us, if we're not deleting
* the entry.
*/
if(dirtied && !deleted) {
+ hbool_t curr_compressed = FALSE; /* dummy for call */
size_t curr_size = 0;
+ size_t curr_compressed_size = 0; /* dummy for call */
- if((type->size)(f, thing, &curr_size) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, "Can't get size of thing")
+ if((type->image_len)(thing, &curr_size, &curr_compressed, &curr_compressed_size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "Can't get size of thing")
if(((H5AC_info_t *)thing)->size != curr_size)
HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, FAIL, "size of entry changed")
} /* end if */
#ifdef H5_HAVE_PARALLEL
- if((dirtied) && (((H5AC_info_t *)thing)->is_dirty == FALSE) &&
- (NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr))) {
- if(H5AC_log_dirtied_entry((H5AC_info_t *)thing, addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log dirtied entry")
- } /* end if */
+ if(NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr)) {
+ if(dirtied && ((H5AC_info_t *)thing)->is_dirty == FALSE)
+ if(H5AC__log_dirtied_entry((H5AC_info_t *)thing) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log dirtied entry")
- if((deleted) &&
- (NULL != (aux_ptr = (H5AC_aux_t *)(f->shared->cache->aux_ptr))) &&
- (aux_ptr->mpi_rank == 0)) {
- if(H5AC_log_deleted_entry(f->shared->cache, (H5AC_info_t *)thing, addr, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC_log_deleted_entry() failed.")
+ if(deleted && aux_ptr->mpi_rank == 0)
+ if(H5AC__log_deleted_entry((H5AC_info_t *)thing) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC__log_deleted_entry() failed.")
} /* end if */
#endif /* H5_HAVE_PARALLEL */
- if(H5C_unprotect(f, dxpl_id, H5AC_dxpl_id, type, addr, thing, flags) < 0)
+ if(H5C_unprotect(f, dxpl_id, type, addr, thing, flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5C_unprotect() failed.")
#ifdef H5_HAVE_PARALLEL
/* Check if we should try to flush */
- if((aux_ptr != NULL) && (aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)) {
- if(H5AC_run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ if((aux_ptr != NULL) && (aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold))
+ if(H5AC__run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
- } /* end if */
#endif /* H5_HAVE_PARALLEL */
done:
#if H5AC__TRACE_FILE_ENABLED
if(trace_file_ptr != NULL)
- HDfprintf(trace_file_ptr, "%s 0x%x %d\n",
- trace, (unsigned)flags, (int)ret_value);
+ HDfprintf(trace_file_ptr, "%s 0x%x %d\n", trace, (unsigned)flags, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
@@ -1678,12 +1515,11 @@ H5AC_set_sync_point_done_callback(H5C_t * cache_ptr,
FUNC_ENTER_NOAPI_NOINIT_NOERR
+ /* Sanity checks */
HDassert(cache_ptr && (cache_ptr->magic == H5C__H5C_T_MAGIC));
-
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
aux_ptr->sync_point_done = sync_point_done;
@@ -1708,17 +1544,15 @@ H5AC_set_sync_point_done_callback(H5C_t * cache_ptr,
*/
#ifdef H5_HAVE_PARALLEL
herr_t
-H5AC_set_write_done_callback(H5C_t * cache_ptr,
- void (* write_done)(void))
+H5AC_set_write_done_callback(H5C_t * cache_ptr, void (* write_done)(void))
{
H5AC_aux_t * aux_ptr;
FUNC_ENTER_NOAPI_NOINIT_NOERR
+ /* Sanity checks */
HDassert(cache_ptr && (cache_ptr->magic == H5C__H5C_T_MAGIC));
-
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
HDassert( aux_ptr != NULL );
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
@@ -1728,6 +1562,7 @@ H5AC_set_write_done_callback(H5C_t * cache_ptr,
} /* H5AC_set_write_done_callback() */
#endif /* H5_HAVE_PARALLEL */
+#ifndef NDEBUG /* debugging functions */
/*-------------------------------------------------------------------------
* Function: H5AC_stats
@@ -1744,10 +1579,9 @@ H5AC_set_write_done_callback(H5C_t * cache_ptr,
herr_t
H5AC_stats(const H5F_t *f)
{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
@@ -1755,8 +1589,7 @@ H5AC_stats(const H5F_t *f)
/* at present, this can't fail */
(void)H5C_stats(f->shared->cache, H5F_OPEN_NAME(f), FALSE);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5AC_stats() */
@@ -1780,18 +1613,18 @@ H5AC_dump_cache(const H5F_t *f)
FUNC_ENTER_NOAPI(FAIL)
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
- if ( H5C_dump_cache(f->shared->cache, H5F_OPEN_NAME(f)) < 0 ) {
-
+ if(H5C_dump_cache(f->shared->cache, H5F_OPEN_NAME(f)) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_dump_cache() failed.")
- }
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_dump_cache() */
+#endif /* NDEBUG */
/*-------------------------------------------------------------------------
@@ -1807,59 +1640,36 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
- H5AC_cache_config_t *config_ptr)
+H5AC_get_cache_auto_resize_config(const H5AC_t *cache_ptr,
+ H5AC_cache_config_t *config_ptr)
{
- herr_t result;
- herr_t ret_value = SUCCEED; /* Return value */
- hbool_t evictions_enabled;
H5C_auto_size_ctl_t internal_config;
+ hbool_t evictions_enabled;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if ( ( cache_ptr == NULL )
- ||
+ /* Check args */
+ if((cache_ptr == NULL) ||
#ifdef H5_HAVE_PARALLEL
- ( ( cache_ptr->aux_ptr != NULL )
- &&
- ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
- !=
- H5AC__H5AC_AUX_T_MAGIC
- )
- )
- ||
+ ((cache_ptr->aux_ptr != NULL) &&
+ (((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic != H5AC__H5AC_AUX_T_MAGIC))
+ ||
#endif /* H5_HAVE_PARALLEL */
- ( config_ptr == NULL )
- ||
- ( config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION )
- )
- {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Bad cache_ptr or config_ptr on entry.")
-
- }
-
- result = H5C_get_cache_auto_resize_config((const H5C_t *)cache_ptr,
- &internal_config);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_cache_auto_resize_config() failed.")
- }
+ (config_ptr == NULL) || (config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or config_ptr on entry.")
+ /* Retrieve the configuration */
+ if(H5C_get_cache_auto_resize_config((const H5C_t *)cache_ptr, &internal_config) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_auto_resize_config() failed.")
if(H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_resize_enabled() failed.")
- if ( internal_config.rpt_fcn == NULL ) {
-
+ /* Set the information to return */
+ if(internal_config.rpt_fcn == NULL)
config_ptr->rpt_fcn_enabled = FALSE;
-
- } else {
-
+ else
config_ptr->rpt_fcn_enabled = TRUE;
- }
-
config_ptr->open_trace_file = FALSE;
config_ptr->close_trace_file = FALSE;
config_ptr->trace_file_name[0] = '\0';
@@ -1883,35 +1693,24 @@ H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
config_ptr->decrement = internal_config.decrement;
config_ptr->apply_max_decrement = internal_config.apply_max_decrement;
config_ptr->max_decrement = internal_config.max_decrement;
- config_ptr->epochs_before_eviction =
- (int)(internal_config.epochs_before_eviction);
+ config_ptr->epochs_before_eviction = (int)(internal_config.epochs_before_eviction);
config_ptr->apply_empty_reserve = internal_config.apply_empty_reserve;
config_ptr->empty_reserve = internal_config.empty_reserve;
-
#ifdef H5_HAVE_PARALLEL
- if ( cache_ptr->aux_ptr != NULL ) {
-
- config_ptr->dirty_bytes_threshold =
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold;
- config_ptr->metadata_write_strategy =
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy;
-
- } else {
+ if(cache_ptr->aux_ptr != NULL) {
+ config_ptr->dirty_bytes_threshold = ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold;
+ config_ptr->metadata_write_strategy = ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy;
+ } /* end if */
+ else {
#endif /* H5_HAVE_PARALLEL */
-
- config_ptr->dirty_bytes_threshold =
- H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD;
- config_ptr->metadata_write_strategy =
- H5AC__DEFAULT_METADATA_WRITE_STRATEGY;
-
+ config_ptr->dirty_bytes_threshold = H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD;
+ config_ptr->metadata_write_strategy = H5AC__DEFAULT_METADATA_WRITE_STRATEGY;
#ifdef H5_HAVE_PARALLEL
- }
+ } /* end else */
#endif /* H5_HAVE_PARALLEL */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_get_cache_auto_resize_config() */
@@ -1928,33 +1727,19 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_get_cache_size(H5AC_t * cache_ptr,
- size_t * max_size_ptr,
- size_t * min_clean_size_ptr,
- size_t * cur_size_ptr,
- int32_t * cur_num_entries_ptr)
+H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr,
+ size_t *cur_size_ptr, int32_t *cur_num_entries_ptr)
{
- herr_t result;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- result = H5C_get_cache_size((H5C_t *)cache_ptr,
- max_size_ptr,
- min_clean_size_ptr,
- cur_size_ptr,
- cur_num_entries_ptr);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_cache_size() failed.")
- }
+ if(H5C_get_cache_size((H5C_t *)cache_ptr, max_size_ptr, min_clean_size_ptr,
+ cur_size_ptr, cur_num_entries_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_size() failed.")
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_get_cache_size() */
@@ -1971,7 +1756,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_get_cache_hit_rate(H5AC_t * cache_ptr, double * hit_rate_ptr)
+H5AC_get_cache_hit_rate(H5AC_t *cache_ptr, double *hit_rate_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -2000,23 +1785,15 @@ done:
herr_t
H5AC_reset_cache_hit_rate_stats(H5AC_t * cache_ptr)
{
- herr_t result;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- result = H5C_reset_cache_hit_rate_stats((H5C_t *)cache_ptr);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_reset_cache_hit_rate_stats() failed.")
- }
+ if(H5C_reset_cache_hit_rate_stats((H5C_t *)cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats() failed.")
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_reset_cache_hit_rate_stats() */
@@ -2033,124 +1810,84 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr,
- H5AC_cache_config_t *config_ptr)
+H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, H5AC_cache_config_t *config_ptr)
{
- herr_t result;
- herr_t ret_value = SUCCEED; /* Return value */
H5C_auto_size_ctl_t internal_config;
#if H5AC__TRACE_FILE_ENABLED
H5AC_cache_config_t trace_config = H5AC__DEFAULT_CACHE_CONFIG;
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- HDassert( cache_ptr );
+ /* Sanity checks */
+ HDassert(cache_ptr);
#if H5AC__TRACE_FILE_ENABLED
/* Make note of the new configuration. Don't look up the trace file
* pointer, as that may change before we use it.
*/
- if ( config_ptr != NULL ) {
-
+ if(config_ptr != NULL)
trace_config = *config_ptr;
-
- }
#endif /* H5AC__TRACE_FILE_ENABLED */
- if ( ( cache_ptr == NULL )
+ if((cache_ptr == NULL)
#ifdef H5_HAVE_PARALLEL
- ||
- ( ( cache_ptr->aux_ptr != NULL )
- &&
- (
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
- !=
- H5AC__H5AC_AUX_T_MAGIC
- )
- )
+ || ((cache_ptr->aux_ptr != NULL) &&
+ (((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic != H5AC__H5AC_AUX_T_MAGIC))
#endif /* H5_HAVE_PARALLEL */
- ) {
-
+ )
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad cache_ptr on entry.")
- }
-
- result = H5AC_validate_config(config_ptr);
-
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Bad cache configuration");
- }
-
- if ( config_ptr->open_trace_file ) {
-
- FILE * file_ptr = NULL;
-
- if ( H5C_get_trace_file_ptr(cache_ptr, &file_ptr) < 0 ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_trace_file_ptr() failed.")
- }
+ /* Validate external configuration */
+ if(H5AC_validate_config(config_ptr) != SUCCEED)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache configuration");
- if ( ( ! ( config_ptr->close_trace_file ) ) &&
- ( file_ptr != NULL ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "Trace file already open.")
- }
- }
-
- if ( config_ptr->close_trace_file ) {
-
- if ( H5AC_close_trace_file(cache_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5AC_close_trace_file() failed.")
- }
- }
+ if(config_ptr->open_trace_file) {
+ FILE * file_ptr;
- if ( config_ptr->open_trace_file ) {
+ if(NULL == (file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_trace_file_ptr() failed.")
- if ( H5AC_open_trace_file(cache_ptr, config_ptr->trace_file_name) < 0 )
- {
+ if((!(config_ptr->close_trace_file)) && (file_ptr != NULL))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Trace file already open.")
+ } /* end if */
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "H5AC_open_trace_file() failed.")
- }
- }
+ /* Close & reopen trace file, if requested */
+ if(config_ptr->close_trace_file)
+ if(H5AC_close_trace_file(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_close_trace_file() failed.")
+ if(config_ptr->open_trace_file)
+ if(H5AC_open_trace_file(cache_ptr, config_ptr->trace_file_name) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "H5AC_open_trace_file() failed.")
- if(H5AC_ext_config_2_int_config(config_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_ext_config_2_int_config() failed.")
+ /* Convert external configuration to internal representation */
+ if(H5AC__ext_config_2_int_config(config_ptr, &internal_config) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed.")
+ /* Set configuration */
if(H5C_set_cache_auto_resize_config(cache_ptr, &internal_config) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_cache_auto_resize_config() failed.")
-
if(H5C_set_evictions_enabled(cache_ptr, config_ptr->evictions_enabled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_evictions_enabled() failed.")
#ifdef H5_HAVE_PARALLEL
- if ( cache_ptr->aux_ptr != NULL ) {
-
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold =
- config_ptr->dirty_bytes_threshold;
-
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy =
- config_ptr->metadata_write_strategy;
- }
+ /* Set parallel configuration values */
+ /* (Which are only held in the H5AC layer -QAK) */
+ if(cache_ptr->aux_ptr != NULL) {
+ ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold = config_ptr->dirty_bytes_threshold;
+ ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy = config_ptr->metadata_write_strategy;
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
done:
-
#if H5AC__TRACE_FILE_ENABLED
/* For the set cache auto resize config call, only the contents
* of the config is necessary in the trace file. Write the return
* value to catch occult errors.
*/
- if ( ( cache_ptr != NULL ) &&
- ( H5C_get_trace_file_ptr(cache_ptr, &trace_file_ptr) >= 0 ) &&
- ( trace_file_ptr != NULL ) ) {
-
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
HDfprintf(trace_file_ptr,
"%s %d %d %d %d \"%s\" %d %d %d %f %d %d %ld %d %f %f %d %f %f %d %d %d %f %f %d %d %d %d %f %zu %d %d\n",
"H5AC_set_cache_auto_resize_config",
@@ -2185,11 +1922,9 @@ done:
trace_config.dirty_bytes_threshold,
trace_config.metadata_write_strategy,
(int)ret_value);
- }
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_set_cache_auto_resize_config() */
@@ -2216,27 +1951,18 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_validate_config(H5AC_cache_config_t * config_ptr)
+H5AC_validate_config(H5AC_cache_config_t *config_ptr)
{
H5C_auto_size_ctl_t internal_config;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
+ /* Check args */
if(config_ptr == NULL)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL config_ptr on entry.")
-
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL config_ptr on entry.")
if(config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown config version.")
-
- if((config_ptr->rpt_fcn_enabled != TRUE) && (config_ptr->rpt_fcn_enabled != FALSE))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->rpt_fcn_enabled must be either TRUE or FALSE.")
-
- if((config_ptr->open_trace_file != TRUE) && (config_ptr->open_trace_file != FALSE))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->open_trace_file must be either TRUE or FALSE.")
-
- if((config_ptr->close_trace_file != TRUE) && (config_ptr->close_trace_file != FALSE))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->close_trace_file must be either TRUE or FALSE.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Unknown config version.")
/* don't bother to test trace_file_name unless open_trace_file is TRUE */
if(config_ptr->open_trace_file) {
@@ -2247,45 +1973,32 @@ H5AC_validate_config(H5AC_cache_config_t * config_ptr)
* sanity checks on the length of the file name.
*/
name_len = HDstrlen(config_ptr->trace_file_name);
+ if(name_len == 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name is empty.")
+ else if(name_len > H5AC__MAX_TRACE_FILE_NAME_LEN)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name too long.")
+ } /* end if */
- if(name_len == 0) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name is empty.")
- } else if(name_len > H5AC__MAX_TRACE_FILE_NAME_LEN) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name too long.")
- }
- }
-
- if ( ( config_ptr->evictions_enabled != TRUE ) &&
- ( config_ptr->evictions_enabled != FALSE ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "config_ptr->evictions_enabled must be either TRUE or FALSE.")
- }
-
- if ( ( config_ptr->evictions_enabled == FALSE ) &&
- ( ( config_ptr->incr_mode != H5C_incr__off ) ||
- ( config_ptr->flash_incr_mode != H5C_flash_incr__off ) ||
- ( config_ptr->decr_mode != H5C_decr__off ) ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "Can't disable evictions while auto-resize is enabled.")
- }
+ if((config_ptr->evictions_enabled == FALSE) &&
+ ((config_ptr->incr_mode != H5C_incr__off) ||
+ (config_ptr->flash_incr_mode != H5C_flash_incr__off) ||
+ (config_ptr->decr_mode != H5C_decr__off)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Can't disable evictions while auto-resize is enabled.")
- if(config_ptr->dirty_bytes_threshold < H5AC__MIN_DIRTY_BYTES_THRESHOLD) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too small.")
- } else if(config_ptr->dirty_bytes_threshold > H5AC__MAX_DIRTY_BYTES_THRESHOLD) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too big.")
- }
+ if(config_ptr->dirty_bytes_threshold < H5AC__MIN_DIRTY_BYTES_THRESHOLD)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too small.")
+ else if(config_ptr->dirty_bytes_threshold > H5AC__MAX_DIRTY_BYTES_THRESHOLD)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too big.")
if((config_ptr->metadata_write_strategy != H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY) &&
(config_ptr->metadata_write_strategy != H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->metadata_write_strategy out of range.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->metadata_write_strategy out of range.")
- if(H5AC_ext_config_2_int_config(config_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_ext_config_2_int_config() failed.")
+ if(H5AC__ext_config_2_int_config(config_ptr, &internal_config) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed.")
if(H5C_validate_resize_config(&internal_config, H5C_RESIZE_CFG__VALIDATE_ALL) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "error(s) in new config.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "error(s) in new config.")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2309,44 +2022,29 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_close_trace_file(H5AC_t * cache_ptr)
-
+H5AC_close_trace_file(H5AC_t *cache_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
FILE * trace_file_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if ( cache_ptr == NULL ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL cache_ptr on entry.")
- }
-
- if ( H5C_get_trace_file_ptr(cache_ptr, &trace_file_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_trace_file_ptr() failed.")
- }
-
- if ( trace_file_ptr != NULL ) {
+ if(cache_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL cache_ptr on entry.")
- if ( H5C_set_trace_file_ptr(cache_ptr, NULL) < 0 ) {
+ if(NULL == (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_trace_file_ptr() failed.")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_set_trace_file_ptr() failed.")
- }
-
- if ( HDfclose(trace_file_ptr) != 0 ) {
+ if(trace_file_ptr != NULL) {
+ if(H5C_set_trace_file_ptr(cache_ptr, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_trace_file_ptr() failed.")
- HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, \
- "can't close metadata cache trace file")
- }
- }
+ if(HDfclose(trace_file_ptr) != 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, "can't close metadata cache trace file")
+ } /* end if */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_close_trace_file() */
@@ -2367,99 +2065,57 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_open_trace_file(H5AC_t * cache_ptr,
- const char * trace_file_name)
+H5AC_open_trace_file(H5AC_t *cache_ptr, const char *trace_file_name)
{
- herr_t ret_value = SUCCEED; /* Return value */
char file_name[H5AC__MAX_TRACE_FILE_NAME_LEN + H5C__PREFIX_LEN + 2];
FILE * file_ptr = NULL;
#ifdef H5_HAVE_PARALLEL
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
HDassert(cache_ptr);
- if ( cache_ptr == NULL ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache_ptr NULL on entry.")
- }
-
- if ( trace_file_name == NULL ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "NULL trace_file_name on entry.")
- }
-
- if ( HDstrlen(trace_file_name) > H5AC__MAX_TRACE_FILE_NAME_LEN ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "trace file name too long.")
- }
-
- if ( H5C_get_trace_file_ptr(cache_ptr, &file_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_trace_file_ptr() failed.")
- }
-
- if ( file_ptr != NULL ) {
-
- HGOTO_ERROR(H5E_FILE, H5E_FILEOPEN, FAIL, "trace file already open.")
- }
+ /* Check args */
+ if(cache_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "cache_ptr NULL on entry.")
+ if(trace_file_name == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL trace_file_name on entry.")
+ if(HDstrlen(trace_file_name) > H5AC__MAX_TRACE_FILE_NAME_LEN)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "trace file name too long.")
+ if(NULL != (file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_FILEOPEN, FAIL, "trace file already open.")
#ifdef H5_HAVE_PARALLEL
-
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- if ( cache_ptr->aux_ptr == NULL ) {
-
+ if(cache_ptr->aux_ptr == NULL)
sprintf(file_name, "%s", trace_file_name);
-
- } else {
-
- if ( aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC ) {
-
+ else {
+ if(aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad aux_ptr->magic.")
- }
sprintf(file_name, "%s.%d", trace_file_name, aux_ptr->mpi_rank);
+ } /* end else */
- }
-
- if ( HDstrlen(file_name) >
- H5AC__MAX_TRACE_FILE_NAME_LEN + H5C__PREFIX_LEN + 1 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "cooked trace file name too long.")
- }
-
+ if(HDstrlen(file_name) > H5AC__MAX_TRACE_FILE_NAME_LEN + H5C__PREFIX_LEN + 1)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "cooked trace file name too long.")
#else /* H5_HAVE_PARALLEL */
-
- HDsnprintf(file_name,
- (size_t)(H5AC__MAX_TRACE_FILE_NAME_LEN + H5C__PREFIX_LEN + 1),
+ HDsnprintf(file_name, (size_t)(H5AC__MAX_TRACE_FILE_NAME_LEN + H5C__PREFIX_LEN + 1),
"%s", trace_file_name);
-
#endif /* H5_HAVE_PARALLEL */
- if ( (file_ptr = HDfopen(file_name, "w")) == NULL ) {
-
- /* trace file open failed */
+ if((file_ptr = HDfopen(file_name, "w")) == NULL)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "trace file open failed.")
- }
HDfprintf(file_ptr, "### HDF5 metadata cache trace file version 1 ###\n");
- if ( H5C_set_trace_file_ptr(cache_ptr, file_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_set_trace_file_ptr() failed.")
- }
+ if(H5C_set_trace_file_ptr(cache_ptr, file_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_trace_file_ptr() failed.")
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_open_trace_file() */
@@ -2483,8 +2139,7 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
herr_t
-H5AC_add_candidate(H5AC_t * cache_ptr,
- haddr_t addr)
+H5AC_add_candidate(H5AC_t * cache_ptr, haddr_t addr)
{
H5AC_aux_t * aux_ptr;
H5AC_slist_entry_t * slist_entry_ptr = NULL;
@@ -2492,36 +2147,31 @@ H5AC_add_candidate(H5AC_t * cache_ptr,
FUNC_ENTER_NOAPI(FAIL)
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert(aux_ptr->candidate_slist_ptr != NULL);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
- HDassert( aux_ptr->candidate_slist_ptr != NULL );
-
- /* If the supplied address appears in the candidate list, scream and die. */
- if(NULL != H5SL_search(aux_ptr->candidate_slist_ptr, (void *)(&addr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry already in candidate slist.")
-
- /* otherwise, construct an entry for the supplied address, and insert
+ /* Construct an entry for the supplied address, and insert
* it into the candidate slist.
*/
- if(NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "Can't allocate candidate slist entry .")
-
- slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
+ if(NULL == (slist_entry_ptr = H5FL_MALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate candidate slist entry")
slist_entry_ptr->addr = addr;
if(H5SL_insert(aux_ptr->candidate_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist.")
-
- aux_ptr->candidate_slist_len += 1;
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist")
done:
+ /* Clean up on error */
+ if(ret_value < 0)
+ if(slist_entry_ptr)
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_add_candidate() */
#endif /* H5_HAVE_PARALLEL */
@@ -2568,33 +2218,22 @@ done:
*
* Programmer: John Mainzer, 5/30/14
*
- * Changes:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
#ifndef NDEBUG
herr_t
-H5AC_get_entry_ptr_from_addr(const H5F_t *f,
- haddr_t addr,
- void ** entry_ptr_ptr)
+H5AC_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr, void **entry_ptr_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if ( H5C_get_entry_ptr_from_addr(f, addr, entry_ptr_ptr) < 0 )
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_entry_ptr_from_addr() failed.")
+ if(H5C_get_entry_ptr_from_addr(f, addr, entry_ptr_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_ptr_from_addr() failed")
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_get_entry_ptr_from_addr() */
-
#endif /* NDEBUG */
@@ -2624,36 +2263,23 @@ done:
*
* Programmer: John Mainzer, 5/30/14
*
- * Changes:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
#ifndef NDEBUG
herr_t
-H5AC_verify_entry_type(const H5F_t *f,
- haddr_t addr,
- const H5AC_class_t * expected_type,
- hbool_t * in_cache_ptr,
- hbool_t * type_ok_ptr)
+H5AC_verify_entry_type(const H5F_t *f, haddr_t addr, const H5AC_class_t *expected_type,
+ hbool_t *in_cache_ptr, hbool_t *type_ok_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if ( H5C_verify_entry_type(f, addr, (const H5C_class_t *)expected_type,
- in_cache_ptr, type_ok_ptr) < 0 )
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_verify_entry_type() failed.")
+ if(H5C_verify_entry_type(f, addr, expected_type, in_cache_ptr, type_ok_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_verify_entry_type() failed")
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_verify_entry_type() */
-
#endif /* NDEBUG */
@@ -2664,7 +2290,7 @@ done:
/*-------------------------------------------------------------------------
*
- * Function: H5AC_broadcast_candidate_list()
+ * Function: H5AC__broadcast_candidate_list()
*
* Purpose: Broadcast the contents of the process 0 candidate entry
* slist. In passing, also remove all entries from said
@@ -2688,98 +2314,125 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_broadcast_candidate_list(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr)
+H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, int *num_entries_ptr,
+ haddr_t **haddr_buf_ptr_ptr)
{
- hbool_t success = FALSE;
H5AC_aux_t * aux_ptr = NULL;
haddr_t * haddr_buf_ptr = NULL;
- MPI_Offset * MPI_Offset_buf_ptr = NULL;
- size_t buf_size = 0;
int mpi_result;
- int chk_num_entries = 0;
- int num_entries = 0;
+ int num_entries;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->mpi_rank == 0 );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
- HDassert( aux_ptr->candidate_slist_ptr != NULL );
- HDassert( H5SL_count(aux_ptr->candidate_slist_ptr) ==
- (size_t)(aux_ptr->candidate_slist_len) );
- HDassert( num_entries_ptr != NULL );
- HDassert( *num_entries_ptr == 0 );
- HDassert( haddr_buf_ptr_ptr != NULL );
- HDassert( *haddr_buf_ptr_ptr == NULL );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank == 0);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert(aux_ptr->candidate_slist_ptr != NULL);
+ HDassert(num_entries_ptr != NULL);
+ HDassert(*num_entries_ptr == 0);
+ HDassert(haddr_buf_ptr_ptr != NULL);
+ HDassert(*haddr_buf_ptr_ptr == NULL);
/* First broadcast the number of entries in the list so that the
* receivers can set up buffers to receive them. If there aren't
* any, we are done.
*/
- num_entries = aux_ptr->candidate_slist_len;
+ num_entries = (int)H5SL_count(aux_ptr->candidate_slist_ptr);
if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
if(num_entries > 0) {
+ size_t buf_size = 0;
+ int chk_num_entries = 0;
+
/* convert the candidate list into the format we
* are used to receiving from process 0, and also load it
* into a buffer for transmission.
*/
- if(H5AC_copy_candidate_list_to_buffer(cache_ptr, &chk_num_entries,
- &haddr_buf_ptr, &buf_size, &MPI_Offset_buf_ptr) < 0)
+ if(H5AC__copy_candidate_list_to_buffer(cache_ptr, &chk_num_entries, &haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer.")
+ HDassert(chk_num_entries == num_entries);
+ HDassert(haddr_buf_ptr != NULL);
- HDassert( chk_num_entries == num_entries );
- HDassert( haddr_buf_ptr != NULL );
- HDassert( MPI_Offset_buf_ptr != NULL );
- HDassert( aux_ptr->candidate_slist_len == 0 );
-
- /* Now broadcast the list of candidate entries -- if there is one.
- *
- * The peculiar structure of the following call to MPI_Bcast is
- * due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
- * buf_size computed above.
- */
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
+ /* Now broadcast the list of candidate entries */
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)haddr_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
} /* end if */
- success = TRUE;
+ /* Pass the number of entries and the buffer pointer
+ * back to the caller. Do this so that we can use the same code
+ * to apply the candidate list to all the processes.
+ */
+ *num_entries_ptr = num_entries;
+ *haddr_buf_ptr_ptr = haddr_buf_ptr;
done:
- if(MPI_Offset_buf_ptr != NULL)
- MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
-
- if(success) {
- /* Pass the number of entries and the buffer pointer
- * back to the caller. Do this so that we can use the same code
- * to apply the candidate list to all the processes.
- */
- *num_entries_ptr = num_entries;
- *haddr_buf_ptr_ptr = haddr_buf_ptr;
- } else if(haddr_buf_ptr != NULL) {
- haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
- }
+ if(ret_value < 0)
+ if(haddr_buf_ptr)
+ haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_broadcast_candidate_list() */
+} /* H5AC__broadcast_candidate_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_broadcast_clean_list()
+ * Function: H5AC__broadcast_clean_list_cb()
+ *
+ * Purpose: Skip list callback for building array of addresses for
+ * broadcasting the clean list.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: Quincey Koziol, 6/12/15
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC__broadcast_clean_list_cb(void *_item, void H5_ATTR_UNUSED *_key,
+ void *_udata)
+{
+ H5AC_slist_entry_t * slist_entry_ptr = (H5AC_slist_entry_t *)_item; /* Address of item */
+ H5AC_addr_list_ud_t * udata = (H5AC_addr_list_ud_t *)_udata; /* Context for callback */
+ haddr_t addr;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(slist_entry_ptr);
+ HDassert(udata);
+
+ /* Store the entry's address in the buffer */
+ addr = slist_entry_ptr->addr;
+ udata->addr_buf_ptr[udata->i] = addr;
+ udata->i++;
+
+ /* now release the entry */
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
+
+ /* and also remove the matching entry from the dirtied list
+ * if it exists.
+ */
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(udata->aux_ptr->d_slist_ptr, (void *)(&addr))))
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5AC__broadcast_clean_list_cb() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC__broadcast_clean_list()
*
* Purpose: Broadcast the contents of the process 0 cleaned entry
* slist. In passing, also remove all entries from said
@@ -2799,174 +2452,76 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
+H5AC__broadcast_clean_list(H5AC_t * cache_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
- haddr_t addr;
haddr_t * addr_buf_ptr = NULL;
- H5AC_aux_t * aux_ptr = NULL;
- H5SL_node_t * slist_node_ptr = NULL;
- H5AC_slist_entry_t * slist_entry_ptr = NULL;
- MPI_Offset * buf_ptr = NULL;
- size_t buf_size;
- int i = 0;
+ H5AC_aux_t * aux_ptr;
int mpi_result;
int num_entries = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->mpi_rank == 0 );
- HDassert( aux_ptr->c_slist_ptr != NULL );
- HDassert( H5SL_count(aux_ptr->c_slist_ptr) ==
- (size_t)(aux_ptr->c_slist_len) );
-
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank == 0);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
/* First broadcast the number of entries in the list so that the
* receives can set up a buffer to receive them. If there aren't
* any, we are done.
*/
- num_entries = aux_ptr->c_slist_len;
-
- mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm);
-
- if ( mpi_result != MPI_SUCCESS ) {
-
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
+ num_entries = (int)H5SL_count(aux_ptr->c_slist_ptr);
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
- }
+ if(num_entries > 0) {
+ H5AC_addr_list_ud_t udata;
+ size_t buf_size;
- if ( num_entries > 0 )
- {
/* allocate a buffer to store the list of entry base addresses in */
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ if(NULL == (addr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for addr buffer")
+
+ /* Set up user data for callback */
+ udata.aux_ptr = aux_ptr;
+ udata.addr_buf_ptr = addr_buf_ptr;
+ udata.i = 0;
+
+ /* Free all the clean list entries, building the address list in the callback */
+ /* (Callback also removes the matching entries from the dirtied list) */
+ if(H5SL_free(aux_ptr->c_slist_ptr, H5AC__broadcast_clean_list_cb, &udata) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "Can't build address list for clean entries")
+
+ /* Now broadcast the list of cleaned entries */
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)addr_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
+ } /* end if */
- buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
-
- buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size);
-
- if ( buf_ptr == NULL ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "memory allocation failed for clean entry buffer")
- }
-
- /* if the sync_point_done callback is defined, allocate the
- * addr buffer as well.
- */
- if ( aux_ptr->sync_point_done != NULL ) {
-
- addr_buf_ptr = (haddr_t *)H5MM_malloc((size_t)num_entries * sizeof(haddr_t));
-
- if ( addr_buf_ptr == NULL ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "memory allocation failed for addr buffer")
- }
- }
-
-
- /* now load the entry base addresses into the buffer, emptying the
- * cleaned entry list in passing
- */
-
- while ( NULL != (slist_node_ptr = H5SL_first(aux_ptr->c_slist_ptr) ) )
- {
- slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_item(slist_node_ptr);
-
- HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
-
- HDassert( i < num_entries );
-
- addr = slist_entry_ptr->addr;
-
- if ( addr_buf_ptr != NULL ) {
-
- addr_buf_ptr[i] = addr;
- }
-
- if ( H5FD_mpi_haddr_to_MPIOff(addr, &(buf_ptr[i])) < 0 ) {
-
- HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, \
- "can't convert from haddr to MPI off")
- }
-
- i++;
-
- /* now remove the entry from the cleaned entry list */
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from cleaned entry slist.")
- }
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->c_slist_len -= 1;
-
- HDassert( aux_ptr->c_slist_len >= 0 );
-
- /* and also remove the matching entry from the dirtied list
- * if it exists.
- */
- if((slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr))) != NULL) {
- HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
- HDassert( slist_entry_ptr->addr == addr );
-
- if(H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from dirty entry slist.")
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->d_slist_len -= 1;
-
- HDassert( aux_ptr->d_slist_len >= 0 );
- } /* end if */
- } /* while */
-
-
- /* Now broadcast the list of cleaned entries -- if there is one.
- *
- * The peculiar structure of the following call to MPI_Bcast is
- * due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
- * buf_size computed above.
- */
-
- mpi_result = MPI_Bcast((void *)buf_ptr, (int)buf_size, MPI_BYTE, 0,
- aux_ptr->mpi_comm);
-
- if ( mpi_result != MPI_SUCCESS ) {
-
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
- }
- }
-
- if(aux_ptr->sync_point_done != NULL)
+ /* if it is defined, call the sync point done callback. Note
+ * that this callback is defined purely for testing purposes,
+ * and should be undefined under normal operating circumstances.
+ */
+ if(aux_ptr->sync_point_done)
(aux_ptr->sync_point_done)(num_entries, addr_buf_ptr);
done:
- if(buf_ptr != NULL)
- buf_ptr = (MPI_Offset *)H5MM_xfree((void *)buf_ptr);
- if(addr_buf_ptr != NULL)
+ if(addr_buf_ptr)
addr_buf_ptr = (haddr_t *)H5MM_xfree((void *)addr_buf_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_broadcast_clean_list() */
+} /* H5AC__broadcast_clean_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_check_if_write_permitted
+ * Function: H5AC__check_if_write_permitted
*
* Purpose: Determine if a write is permitted under the current
* circumstances, and set *write_permitted_ptr accordingly.
@@ -2983,62 +2538,44 @@ done:
*
*-------------------------------------------------------------------------
*/
-#ifdef H5_HAVE_PARALLEL
-static herr_t
-H5AC_check_if_write_permitted(const H5F_t *f,
- hid_t H5_ATTR_UNUSED dxpl_id,
- hbool_t * write_permitted_ptr)
-#else /* H5_HAVE_PARALLEL */
static herr_t
-H5AC_check_if_write_permitted(const H5F_t H5_ATTR_UNUSED * f,
- hid_t H5_ATTR_UNUSED dxpl_id,
- hbool_t * write_permitted_ptr)
+H5AC__check_if_write_permitted(const H5F_t
+#ifndef H5_HAVE_PARALLEL
+H5_ATTR_UNUSED
#endif /* H5_HAVE_PARALLEL */
+ *f, hbool_t *write_permitted_ptr)
{
- hbool_t write_permitted = TRUE;
- herr_t ret_value = SUCCEED; /* Return value */
#ifdef H5_HAVE_PARALLEL
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
+ hbool_t write_permitted = TRUE;
-
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC_NOERR
#ifdef H5_HAVE_PARALLEL
- HDassert( f != NULL );
- HDassert( f->shared != NULL );
- HDassert( f->shared->cache != NULL );
-
+ /* Sanity checks */
+ HDassert(f != NULL);
+ HDassert(f->shared != NULL);
+ HDassert(f->shared->cache != NULL);
aux_ptr = (H5AC_aux_t *)(f->shared->cache->aux_ptr);
+ if(aux_ptr != NULL) {
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- if ( aux_ptr != NULL ) {
-
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
-
- if ( ( aux_ptr->mpi_rank == 0 ) ||
- ( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED ) ) {
-
+ if((aux_ptr->mpi_rank == 0) || (aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED))
write_permitted = aux_ptr->write_permitted;
-
- } else {
-
+ else
write_permitted = FALSE;
- }
- }
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
*write_permitted_ptr = write_permitted;
-done:
-
- FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5AC_check_if_write_permitted() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5AC__check_if_write_permitted() */
/*-------------------------------------------------------------------------
- * Function: H5AC_construct_candidate_list()
+ * Function: H5AC__construct_candidate_list()
*
* Purpose: In the parallel case when the metadata_write_strategy is
* H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, process 0 uses
@@ -3058,29 +2595,26 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_construct_candidate_list(H5AC_t * cache_ptr,
- H5AC_aux_t * aux_ptr,
- int sync_point_op)
+H5AC__construct_candidate_list(H5AC_t *cache_ptr, H5AC_aux_t *aux_ptr,
+ int sync_point_op)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
- HDassert( ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE ) ||
- ( aux_ptr->mpi_rank == 0 ) );
- HDassert( aux_ptr->d_slist_ptr != NULL );
- HDassert( aux_ptr->c_slist_ptr != NULL );
- HDassert( aux_ptr->c_slist_len == 0 );
- HDassert( aux_ptr->candidate_slist_ptr != NULL );
- HDassert( aux_ptr->candidate_slist_len == 0 );
- HDassert( ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN ) ||
- ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE ) );
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE) || (aux_ptr->mpi_rank == 0));
+ HDassert(aux_ptr->d_slist_ptr != NULL);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
+ HDassert(H5SL_count(aux_ptr->c_slist_ptr) == 0);
+ HDassert(aux_ptr->candidate_slist_ptr != NULL);
+ HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) == 0);
+ HDassert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) || (sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE));
switch(sync_point_op) {
case H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN:
@@ -3100,13 +2634,52 @@ H5AC_construct_candidate_list(H5AC_t * cache_ptr,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_construct_candidate_list() */
+} /* H5AC__construct_candidate_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_copy_candidate_list_to_buffer
+ * Function: H5AC__copy_candidate_list_to_buffer_cb
+ *
+ * Purpose: Skip list callback for building array of addresses for
+ * broadcasting the candidate list.
+ *
+ * Return: Return SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: Quincey Koziol, 6/12/15
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC__copy_candidate_list_to_buffer_cb(void *_item, void H5_ATTR_UNUSED *_key,
+ void *_udata)
+{
+ H5AC_slist_entry_t * slist_entry_ptr = (H5AC_slist_entry_t *)_item; /* Address of item */
+ H5AC_addr_list_ud_t * udata = (H5AC_addr_list_ud_t *)_udata; /* Context for callback */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(slist_entry_ptr);
+ HDassert(udata);
+
+ /* Store the entry's address in the buffer */
+ udata->addr_buf_ptr[udata->i] = slist_entry_ptr->addr;
+ udata->i++;
+
+ /* now release the entry */
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5AC__copy_candidate_list_to_buffer_cb() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC__copy_candidate_list_to_buffer
*
* Purpose: Allocate buffer(s) and copy the contents of the candidate
* entry slist into it (them). In passing, remove all
@@ -3137,123 +2710,68 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_copy_candidate_list_to_buffer(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr,
- size_t * MPI_Offset_buf_size_ptr,
- MPI_Offset ** MPI_Offset_buf_ptr_ptr)
+H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, int *num_entries_ptr,
+ haddr_t **haddr_buf_ptr_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
- hbool_t success = FALSE;
- haddr_t addr;
H5AC_aux_t * aux_ptr = NULL;
- H5SL_node_t * slist_node_ptr = NULL;
- H5AC_slist_entry_t * slist_entry_ptr = NULL;
- MPI_Offset * MPI_Offset_buf_ptr = NULL;
+ H5AC_addr_list_ud_t udata;
haddr_t * haddr_buf_ptr = NULL;
size_t buf_size;
- int i = 0;
int num_entries = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert(aux_ptr->candidate_slist_ptr != NULL);
+ HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) > 0);
+ HDassert(num_entries_ptr != NULL);
+ HDassert(*num_entries_ptr == 0);
+ HDassert(haddr_buf_ptr_ptr != NULL);
+ HDassert(*haddr_buf_ptr_ptr == NULL);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
- HDassert( aux_ptr->candidate_slist_ptr != NULL );
- HDassert( H5SL_count(aux_ptr->candidate_slist_ptr) ==
- (size_t)(aux_ptr->candidate_slist_len) );
- HDassert( aux_ptr->candidate_slist_len > 0 );
- HDassert( num_entries_ptr != NULL );
- HDassert( *num_entries_ptr == 0 );
- HDassert( haddr_buf_ptr_ptr != NULL );
- HDassert( *haddr_buf_ptr_ptr == NULL );
-
- num_entries = aux_ptr->candidate_slist_len;
+ num_entries = (int)H5SL_count(aux_ptr->candidate_slist_ptr);
/* allocate a buffer(s) to store the list of candidate entry
* base addresses in
*/
- if(MPI_Offset_buf_ptr_ptr != NULL) {
- HDassert( MPI_Offset_buf_size_ptr != NULL );
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer")
- /* allocate a buffer of MPI_Offset */
- buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
- if(NULL == (MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for MPI_Offset buffer")
- } /* end if */
+ /* Set up user data for callback */
+ udata.aux_ptr = aux_ptr;
+ udata.addr_buf_ptr = haddr_buf_ptr;
+ udata.i = 0;
- /* allocate a buffer of haddr_t */
- if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * (size_t)num_entries)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for haddr buffer")
+ /* Free all the candidate list entries, building the address list in the callback */
+ if(H5SL_free(aux_ptr->candidate_slist_ptr, H5AC__copy_candidate_list_to_buffer_cb, &udata) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "Can't build address list for candidate entries")
- /* now load the entry base addresses into the buffer, emptying the
- * candidate entry list in passing
+ /* Pass the number of entries and the buffer pointer
+ * back to the caller.
*/
- while(NULL != (slist_node_ptr = H5SL_first(aux_ptr->candidate_slist_ptr))) {
- slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_item(slist_node_ptr);
-
- HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert( i < num_entries );
-
- addr = slist_entry_ptr->addr;
- haddr_buf_ptr[i] = addr;
- if(MPI_Offset_buf_ptr != NULL) {
- if(H5FD_mpi_haddr_to_MPIOff(addr, &(MPI_Offset_buf_ptr[i])) < 0)
- HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert from haddr to MPI off")
- } /* end if */
-
- i++;
-
- /* now remove the entry from the cleaned entry list */
- if(H5SL_remove(aux_ptr->candidate_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from candidate entry slist.")
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->candidate_slist_len -= 1;
-
- HDassert( aux_ptr->candidate_slist_len >= 0 );
- } /* while */
- HDassert( aux_ptr->candidate_slist_len == 0 );
-
- success = TRUE;
+ *num_entries_ptr = num_entries;
+ *haddr_buf_ptr_ptr = haddr_buf_ptr;
done:
- if(success) {
- /* Pass the number of entries and the buffer pointer
- * back to the caller.
- */
- *num_entries_ptr = num_entries;
- *haddr_buf_ptr_ptr = haddr_buf_ptr;
-
- if(MPI_Offset_buf_ptr_ptr != NULL) {
- HDassert( MPI_Offset_buf_ptr != NULL);
- *MPI_Offset_buf_size_ptr = buf_size;
- *MPI_Offset_buf_ptr_ptr = MPI_Offset_buf_ptr;
- } /* end if */
- } /* end if */
- else {
- if(MPI_Offset_buf_ptr != NULL)
- MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
- if(haddr_buf_ptr != NULL)
+ if(ret_value < 0)
+ if(haddr_buf_ptr)
haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
- } /* end else */
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_copy_candidate_list_to_buffer() */
+} /* H5AC__copy_candidate_list_to_buffer() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_ext_config_2_int_config()
+ * Function: H5AC__ext_config_2_int_config()
*
* Purpose: Utility function to translate an instance of
* H5AC_cache_config_t to an instance of H5C_auto_size_ctl_t.
@@ -3271,30 +2789,22 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC_ext_config_2_int_config(H5AC_cache_config_t * ext_conf_ptr,
- H5C_auto_size_ctl_t * int_conf_ptr)
+H5AC__ext_config_2_int_config(H5AC_cache_config_t *ext_conf_ptr,
+ H5C_auto_size_ctl_t *int_conf_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
- if ( ( ext_conf_ptr == NULL ) ||
- ( ext_conf_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION ) ||
- ( int_conf_ptr == NULL ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Bad ext_conf_ptr or inf_conf_ptr on entry.")
- }
+ if((ext_conf_ptr == NULL) || (ext_conf_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION) ||
+ (int_conf_ptr == NULL))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad ext_conf_ptr or inf_conf_ptr on entry.")
int_conf_ptr->version = H5C__CURR_AUTO_SIZE_CTL_VER;
-
- if ( ext_conf_ptr->rpt_fcn_enabled ) {
-
+ if(ext_conf_ptr->rpt_fcn_enabled)
int_conf_ptr->rpt_fcn = H5C_def_auto_resize_rpt_fcn;
-
- } else {
-
+ else
int_conf_ptr->rpt_fcn = NULL;
- }
int_conf_ptr->set_initial_size = ext_conf_ptr->set_initial_size;
int_conf_ptr->initial_size = ext_conf_ptr->initial_size;
@@ -3323,18 +2833,17 @@ H5AC_ext_config_2_int_config(H5AC_cache_config_t * ext_conf_ptr,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_ext_config_2_int_config() */
+} /* H5AC__ext_config_2_int_config() */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_log_deleted_entry()
+ * Function: H5AC__log_deleted_entry()
*
- * Purpose: Log an entry for which H5C__DELETED_FLAG has been set.
+ * Purpose: Log an entry which has been deleted.
*
- * If mpi_rank is 0, we must make sure that the entry doesn't
- * appear in the cleaned or dirty entry lists. Otherwise,
- * we have nothing to do.
+ * Only called for mpi_rank 0. We must make sure that the entry
+ * doesn't appear in the cleaned or dirty entry lists.
*
* Return SUCCEED on success, and FAIL on failure.
*
@@ -3346,80 +2855,48 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_log_deleted_entry(H5AC_t * cache_ptr,
- H5AC_info_t * entry_ptr,
- haddr_t addr,
- unsigned int flags)
+H5AC__log_deleted_entry(const H5AC_info_t *entry_ptr)
{
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
H5AC_slist_entry_t * slist_entry_ptr = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
+ haddr_t addr;
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC_NOERR
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ addr = entry_ptr->addr;
+ cache_ptr = entry_ptr->cache_ptr;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank == 0);
+ HDassert(aux_ptr->d_slist_ptr != NULL);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
-
- HDassert( entry_ptr != NULL );
- HDassert( entry_ptr->addr == addr );
-
- HDassert( (flags & H5C__DELETED_FLAG) != 0 );
-
- if(aux_ptr->mpi_rank == 0) {
- HDassert( aux_ptr->d_slist_ptr != NULL );
- HDassert( aux_ptr->c_slist_ptr != NULL );
-
- /* if the entry appears in the dirtied entry slist, remove it. */
- if((slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr))) != NULL) {
- HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert(slist_entry_ptr->addr == addr);
-
- if(H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from dirty entry slist.")
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->d_slist_len -= 1;
-
- HDassert( aux_ptr->d_slist_len >= 0 );
- } /* end if */
-
- /* if the entry appears in the cleaned entry slist, remove it. */
- if((slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr))) != NULL) {
- HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert(slist_entry_ptr->addr == addr);
-
- if(H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from cleaned entry slist.")
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->c_slist_len -= 1;
+ /* if the entry appears in the dirtied entry slist, remove it. */
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))))
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
- HDassert( aux_ptr->c_slist_len >= 0 );
- } /* end if */
- } /* if */
+ /* if the entry appears in the cleaned entry slist, remove it. */
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))))
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_log_deleted_entry() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5AC__log_deleted_entry() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_log_dirtied_entry()
+ * Function: H5AC__log_dirtied_entry()
*
* Purpose: Update the dirty_bytes count for a newly dirtied entry.
*
- * If mpi_rank isnt 0, this simply means adding the size
+ * If mpi_rank isn't 0, this simply means adding the size
* of the entries to the dirty_bytes count.
*
* If mpi_rank is 0, we must first check to see if the entry
@@ -3438,103 +2915,73 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
- haddr_t addr)
+H5AC__log_dirtied_entry(const H5AC_info_t *entry_ptr)
{
H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( entry_ptr );
- HDassert( entry_ptr->addr == addr );
- HDassert( entry_ptr->is_dirty == FALSE );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->is_dirty == FALSE);
cache_ptr = entry_ptr->cache_ptr;
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
-
- if ( aux_ptr->mpi_rank == 0 ) {
- H5AC_slist_entry_t * slist_entry_ptr;
-
- HDassert( aux_ptr->d_slist_ptr != NULL );
- HDassert( aux_ptr->c_slist_ptr != NULL );
+ if(aux_ptr->mpi_rank == 0) {
+ H5AC_slist_entry_t *slist_entry_ptr;
+ haddr_t addr = entry_ptr->addr;
- if ( H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr)) == NULL ) {
+ /* Sanity checks */
+ HDassert(aux_ptr->d_slist_ptr != NULL);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
+ if(NULL == H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr))) {
/* insert the address of the entry in the dirty entry list, and
* add its size to the dirty_bytes count.
*/
- if ( NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)) ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "Can't allocate dirty slist entry .")
- }
-
- slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
+ if(NULL == (slist_entry_ptr = H5FL_MALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate dirty slist entry .")
slist_entry_ptr->addr = addr;
- if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
- &(slist_entry_ptr->addr)) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
- "can't insert entry into dirty entry slist.")
- }
+ if(H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist.")
- aux_ptr->d_slist_len += 1;
aux_ptr->dirty_bytes += entry_ptr->size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
aux_ptr->unprotect_dirty_bytes += entry_ptr->size;
aux_ptr->unprotect_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
- }
-
- if(H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr)) != NULL) {
- /* the entry is dirty. If it exists on the cleaned entries list,
- * remove it.
- */
- if((slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr))) != NULL) {
- HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert(slist_entry_ptr->addr == addr);
-
- if(H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from clean entry slist.")
-
- slist_entry_ptr->magic = 0;
- slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->c_slist_len -= 1;
-
- HDassert( aux_ptr->c_slist_len >= 0 );
- } /* end if */
} /* end if */
- } else {
+ /* the entry is dirty. If it exists on the cleaned entries list,
+ * remove it.
+ */
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))))
+ slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
+ } /* end if */
+ else {
aux_ptr->dirty_bytes += entry_ptr->size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
aux_ptr->unprotect_dirty_bytes += entry_size;
aux_ptr->unprotect_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
- }
+ } /* end else */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5AC_log_dirtied_entry() */
+} /* H5AC__log_dirtied_entry() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_log_flushed_entry()
+ * Function: H5AC__log_flushed_entry()
*
* Purpose: Update the clean entry slist for the flush of an entry --
* specifically, if the entry has been cleared, remove it
@@ -3555,117 +3002,58 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_log_flushed_entry(H5C_t * cache_ptr,
- haddr_t addr,
- hbool_t was_dirty,
- unsigned flags,
- int H5_ATTR_UNUSED type_id)
+H5AC__log_flushed_entry(H5C_t *cache_ptr, haddr_t addr, hbool_t was_dirty,
+ unsigned flags)
{
- herr_t ret_value = SUCCEED; /* Return value */
hbool_t cleared;
H5AC_aux_t * aux_ptr;
H5AC_slist_entry_t * slist_entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
+ FUNC_ENTER_STATIC
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
+ /* Sanity check */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank == 0);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->mpi_rank == 0 );
- HDassert( aux_ptr->c_slist_ptr != NULL );
-
- cleared = ( (flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0 );
-
- if ( cleared ) {
+ /* Set local flags */
+ cleared = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
+ if(cleared) {
/* If the entry has been cleared, must remove it from both the
* cleaned list and the dirtied list.
*/
-
- if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
- H5SL_search(aux_ptr->c_slist_ptr,
- (void *)(&addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert( slist_entry_ptr->addr == addr );
-
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from clean entry slist.")
- }
-
- slist_entry_ptr->magic = 0;
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))))
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->c_slist_len -= 1;
-
- HDassert( aux_ptr->c_slist_len >= 0 );
- }
-
- if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
- H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert( slist_entry_ptr->addr == addr );
-
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from dirty entry slist.")
- }
-
- slist_entry_ptr->magic = 0;
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))))
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
-
- aux_ptr->d_slist_len -= 1;
-
- HDassert( aux_ptr->d_slist_len >= 0 );
- }
- } else if ( was_dirty ) {
-
- if ( H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr)) == NULL ) {
-
+ } /* end if */
+ else if(was_dirty) {
+ if(NULL == H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr))) {
/* insert the address of the entry in the clean entry list. */
+ if(NULL == (slist_entry_ptr = H5FL_MALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate clean slist entry .")
+ slist_entry_ptr->addr = addr;
- if ( NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)) ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "Can't allocate clean slist entry .")
- }
-
- slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
- slist_entry_ptr->addr = addr;
-
- if ( H5SL_insert(aux_ptr->c_slist_ptr, slist_entry_ptr,
- &(slist_entry_ptr->addr)) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
- "can't insert entry into clean entry slist.")
- }
-
- aux_ptr->c_slist_len += 1;
- }
- }
+ if(H5SL_insert(aux_ptr->c_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into clean entry slist.")
+ } /* end if */
+ } /* end else-if */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5AC_log_flushed_entry() */
+} /* H5AC__log_flushed_entry() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_log_inserted_entry()
+ * Function: H5AC__log_inserted_entry()
*
* Purpose: Update the dirty_bytes count for a newly inserted entry.
*
@@ -3685,48 +3073,44 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_log_inserted_entry(H5AC_t * cache_ptr,
- H5AC_info_t * entry_ptr)
+H5AC__log_inserted_entry(const H5AC_info_t *entry_ptr)
{
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ cache_ptr = entry_ptr->cache_ptr;
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
HDassert(aux_ptr != NULL);
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- HDassert( entry_ptr != NULL );
-
if(aux_ptr->mpi_rank == 0) {
- H5AC_slist_entry_t * slist_entry_ptr;
+ H5AC_slist_entry_t *slist_entry_ptr;
HDassert(aux_ptr->d_slist_ptr != NULL);
HDassert(aux_ptr->c_slist_ptr != NULL);
- if(NULL != H5SL_search(aux_ptr->d_slist_ptr, (void *)(&entry_ptr->addr)))
+ /* Entry to insert should not be in dirty list currently */
+ if(NULL != H5SL_search(aux_ptr->d_slist_ptr, (const void *)(&entry_ptr->addr)))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Inserted entry already in dirty slist.")
/* insert the address of the entry in the dirty entry list, and
* add its size to the dirty_bytes count.
*/
- if(NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "Can't allocate dirty slist entry .")
-
- slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
+ if(NULL == (slist_entry_ptr = H5FL_MALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate dirty slist entry .")
slist_entry_ptr->addr = entry_ptr->addr;
-
- if(H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0 )
+ if(H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist.")
- aux_ptr->d_slist_len += 1;
-
- if(NULL != H5SL_search(aux_ptr->c_slist_ptr, (void *)(&entry_ptr->addr)))
+ /* Entry to insert should not be in clean list either */
+ if(NULL != H5SL_search(aux_ptr->c_slist_ptr, (const void *)(&entry_ptr->addr)))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Inserted entry in clean slist.")
} /* end if */
@@ -3739,13 +3123,13 @@ H5AC_log_inserted_entry(H5AC_t * cache_ptr,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_log_inserted_entry() */
+} /* H5AC__log_inserted_entry() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_log_moved_entry()
+ * Function: H5AC__log_moved_entry()
*
* Purpose: Update the dirty_bytes count for a moved entry.
*
@@ -3756,8 +3140,8 @@ done:
* moving in a collective operation and immediately after
* unprotecting the target entry.
*
- * This function uses this invarient, and will cause arcane
- * failures if it is not met. If maintaining this invarient
+ * This function uses this invariant, and will cause arcane
+ * failures if it is not met. If maintaining this invariant
* becomes impossible, we will have to rework this function
* extensively, and likely include a bit of IPC for
* synchronization. A better option might be to subsume
@@ -3791,100 +3175,52 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_log_moved_entry(const H5F_t *f,
- haddr_t old_addr,
- haddr_t new_addr)
+H5AC__log_moved_entry(const H5F_t *f, haddr_t old_addr, haddr_t new_addr)
{
- H5AC_t * cache_ptr;
+ H5AC_t * cache_ptr;
+ H5AC_aux_t * aux_ptr;
hbool_t entry_in_cache;
hbool_t entry_dirty;
size_t entry_size;
- H5AC_aux_t * aux_ptr = NULL;
- H5AC_slist_entry_t * slist_entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f );
- HDassert( f->shared );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
cache_ptr = (H5AC_t *)f->shared->cache;
-
- HDassert( cache_ptr );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
/* get entry status, size, etc here */
- if ( H5C_get_entry_status(f, old_addr, &entry_size, &entry_in_cache,
- &entry_dirty, NULL, NULL, NULL, NULL) < 0 ) {
-
+ if(H5C_get_entry_status(f, old_addr, &entry_size, &entry_in_cache,
+ &entry_dirty, NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get entry status.")
-
- } else if ( ! entry_in_cache ) {
-
- HDassert( entry_in_cache );
+ if(!entry_in_cache)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry not in cache.")
- }
- if ( aux_ptr->mpi_rank == 0 ) {
+ if(aux_ptr->mpi_rank == 0) {
+ H5AC_slist_entry_t * slist_entry_ptr;
- HDassert( aux_ptr->d_slist_ptr != NULL );
- HDassert( aux_ptr->c_slist_ptr != NULL );
+ HDassert(aux_ptr->d_slist_ptr != NULL);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
/* if the entry appears in the cleaned entry slist, under its old
* address, remove it.
*/
- if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
- H5SL_search(aux_ptr->c_slist_ptr, (void *)(&old_addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic ==
- H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
- HDassert( slist_entry_ptr->addr == old_addr );
-
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&old_addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from cleaned entry slist.")
- }
-
- slist_entry_ptr->magic = 0;
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&old_addr))))
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
- aux_ptr->c_slist_len -= 1;
-
- HDassert( aux_ptr->c_slist_len >= 0 );
- }
-
/* if the entry appears in the dirtied entry slist under its old
* address, remove it, but don't free it. Set addr to new_addr.
*/
- if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
- H5SL_search(aux_ptr->d_slist_ptr, (void *)(&old_addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic ==
- H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
- HDassert( slist_entry_ptr->addr == old_addr );
-
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&old_addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from dirty entry slist.")
- }
-
+ if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&old_addr))))
slist_entry_ptr->addr = new_addr;
-
- aux_ptr->d_slist_len -= 1;
-
- HDassert( aux_ptr->d_slist_len >= 0 );
-
- } else {
-
+ else {
/* otherwise, allocate a new entry that is ready
* for insertion, and increment dirty_bytes.
*
@@ -3892,17 +3228,10 @@ H5AC_log_moved_entry(const H5F_t *f,
* list under its old address implies that it must have
* been clean to start with.
*/
-
- HDassert( !entry_dirty );
-
- if ( NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)) ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "Can't allocate dirty slist entry .")
- }
-
- slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
- slist_entry_ptr->addr = new_addr;
+ HDassert(!entry_dirty);
+ if(NULL == (slist_entry_ptr = H5FL_MALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate dirty slist entry .")
+ slist_entry_ptr->addr = new_addr;
aux_ptr->dirty_bytes += entry_size;
@@ -3910,43 +3239,29 @@ H5AC_log_moved_entry(const H5F_t *f,
aux_ptr->move_dirty_bytes += entry_size;
aux_ptr->move_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
- }
-
- /* verify that there is no entry at new_addr in the dirty slist */
- if ( H5SL_search(aux_ptr->d_slist_ptr, (void *)(&new_addr)) != NULL ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "dirty slist already contains entry at new_addr.")
- }
+ } /* end else */
/* insert / reinsert the entry in the dirty slist */
- if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
- &(slist_entry_ptr->addr)) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
- "can't insert entry into dirty entry slist.")
- }
-
- aux_ptr->d_slist_len += 1;
-
- } else if ( ! entry_dirty ) {
-
+ if(H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist.")
+ } /* end if */
+ else if(!entry_dirty) {
aux_ptr->dirty_bytes += entry_size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
aux_ptr->move_dirty_bytes += entry_size;
aux_ptr->move_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
- }
+ } /* end else-if */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_log_moved_entry() */
+} /* H5AC__log_moved_entry() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_propagate_and_apply_candidate_list
+ * Function: H5AC__propagate_and_apply_candidate_list
*
* Purpose: Prior to the addition of support for multiple metadata
* write strategies, in PHDF5, only the metadata cache with
@@ -4042,42 +3357,41 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_propagate_and_apply_candidate_list(H5F_t * f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+H5AC__propagate_and_apply_candidate_list(H5F_t *f, hid_t dxpl_id)
{
- int mpi_code;
- int num_candidates = 0;
- haddr_t * candidates_list_ptr = NULL;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
+ haddr_t * candidates_list_ptr = NULL;
+ int mpi_result;
+ int num_candidates = 0;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
/* to prevent "messages from the future" we must synchronize all
* processes before we write any entries.
*/
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
if(aux_ptr->mpi_rank == 0) {
- if(H5AC_broadcast_candidate_list(cache_ptr, &num_candidates, &candidates_list_ptr) < 0)
+ if(H5AC__broadcast_candidate_list(cache_ptr, &num_candidates, &candidates_list_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't broadcast candidate slist.")
- HDassert( aux_ptr->candidate_slist_len == 0 );
+ HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) == 0);
} /* end if */
else {
- if(H5AC_receive_candidate_list(cache_ptr, &num_candidates, &candidates_list_ptr) < 0)
+ if(H5AC__receive_candidate_list(cache_ptr, &num_candidates, &candidates_list_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't receive candidate broadcast.")
} /* end else */
@@ -4089,55 +3403,59 @@ H5AC_propagate_and_apply_candidate_list(H5F_t * f,
* distributing the writes across the processes.
*/
+ /* Enable writes during this operation */
aux_ptr->write_permitted = TRUE;
- result = H5C_apply_candidate_list(f,
- dxpl_id,
- dxpl_id,
- cache_ptr,
- num_candidates,
- candidates_list_ptr,
- aux_ptr->mpi_rank,
- aux_ptr->mpi_size);
+ /* Apply the candidate list */
+ result = H5C_apply_candidate_list(f, dxpl_id, cache_ptr, num_candidates,
+ candidates_list_ptr, aux_ptr->mpi_rank, aux_ptr->mpi_size);
+ /* Disable writes again */
aux_ptr->write_permitted = FALSE;
+ /* Check for error on the write operation */
if(result < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't apply candidate list.")
- if(aux_ptr->write_done != NULL)
+ /* this code exists primarily for the test bed -- it allows us to
+ * enforce posix semantics on the server that pretends to be a
+ * file system in our parallel tests.
+ */
+ if(aux_ptr->write_done)
(aux_ptr->write_done)();
/* to prevent "messages from the past" we must synchronize all
* processes again before we go on.
*/
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 2", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
- if(aux_ptr->mpi_rank == 0) {
- if(H5AC_tidy_cache_0_lists(cache_ptr, num_candidates, candidates_list_ptr) < 0)
+ /* if this is process zero, tidy up the dirtied,
+ * and flushed and still clean lists.
+ */
+ if(aux_ptr->mpi_rank == 0)
+ if(H5AC__tidy_cache_0_lists(cache_ptr, num_candidates, candidates_list_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't tidy up process 0 lists.")
- } /* end if */
} /* end if */
/* if it is defined, call the sync point done callback. Note
* that this callback is defined purely for testing purposes,
* and should be undefined under normal operating circumstances.
*/
- if(aux_ptr->sync_point_done != NULL)
+ if(aux_ptr->sync_point_done)
(aux_ptr->sync_point_done)(num_candidates, candidates_list_ptr);
done:
- if(candidates_list_ptr != NULL)
+ if(candidates_list_ptr)
candidates_list_ptr = (haddr_t *)H5MM_xfree((void *)candidates_list_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_propagate_and_apply_candidate_list() */
+} /* H5AC__propagate_and_apply_candidate_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_propagate_flushed_and_still_clean_entries_list
+ * Function: H5AC__propagate_flushed_and_still_clean_entries_list
*
* Purpose: In PHDF5, if the process 0 only metadata write strategy
* is selected, only the metadata cache with mpi rank 0 is
@@ -4209,47 +3527,48 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+H5AC__propagate_flushed_and_still_clean_entries_list(H5F_t *f, hid_t dxpl_id)
{
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
HDassert(aux_ptr != NULL);
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- HDassert(aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
if(aux_ptr->mpi_rank == 0) {
- if(H5AC_broadcast_clean_list(cache_ptr) < 0)
+ if(H5AC__broadcast_clean_list(cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't broadcast clean slist.")
- HDassert( aux_ptr->c_slist_len == 0 );
+ HDassert(H5SL_count(aux_ptr->c_slist_ptr) == 0);
} /* end if */
else {
- if(H5AC_receive_and_apply_clean_list(f, dxpl_id, H5AC_dxpl_id, cache_ptr) < 0)
+ if(H5AC__receive_and_apply_clean_list(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't receive and/or process clean slist broadcast.")
} /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_propagate_flushed_and_still_clean_entries_list() */
+} /* H5AC__propagate_flushed_and_still_clean_entries_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_receive_and_apply_clean_list()
+ * Function: H5AC_receive_haddr_list()
*
- * Purpose: Receive the list of cleaned entries from process 0,
- * and mark the specified entries as clean.
+ * Purpose: Receive the list of entry addresses from process 0,
+ * and return it in a buffer pointed to by *haddr_buf_ptr_ptr.
+ * Note that the caller must free this buffer if it is
+ * returned.
*
* This function must only be called by the process with
* MPI_rank greater than 0.
@@ -4258,104 +3577,132 @@ done:
*
* Return: Non-negative on success/Negative on failure.
*
- * Programmer: John Mainzer, 7/4/05
+ * Programmer: Quincey Koziol, 6/11/2015
*
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_receive_and_apply_clean_list(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- H5AC_t * cache_ptr)
+H5AC__receive_haddr_list(MPI_Comm mpi_comm, int *num_entries_ptr,
+ haddr_t **haddr_buf_ptr_ptr)
{
- H5AC_aux_t * aux_ptr;
haddr_t * haddr_buf_ptr = NULL;
- MPI_Offset * MPI_Offset_buf_ptr = NULL;
int mpi_result;
- int num_entries = 0;
+ int num_entries;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f != NULL );
- HDassert( f->shared->cache == cache_ptr );
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
- aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ FUNC_ENTER_STATIC
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->mpi_rank != 0 );
+ /* Sanity checks */
+ HDassert(num_entries_ptr != NULL);
+ HDassert(*num_entries_ptr == 0);
+ HDassert(haddr_buf_ptr_ptr != NULL);
+ HDassert(*haddr_buf_ptr_ptr == NULL);
/* First receive the number of entries in the list so that we
* can set up a buffer to receive them. If there aren't
* any, we are done.
*/
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
if(num_entries > 0) {
size_t buf_size;
- int i;
/* allocate buffers to store the list of entry base addresses in */
- buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
- if(NULL == (MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for receive buffer")
- if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * (size_t)num_entries)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for haddr buffer")
-
- /* Now receive the list of cleaned entries
- *
- * The peculiar structure of the following call to MPI_Bcast is
- * due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
- * buf_size computed above.
- */
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
+ buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer")
+
+ /* Now receive the list of candidate entries */
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)haddr_buf_ptr, (int)buf_size, MPI_BYTE, 0, mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
+ } /* end if */
- /* translate the MPI_Offsets to haddr_t */
- i = 0;
- while(i < num_entries) {
- haddr_buf_ptr[i] = H5FD_mpi_MPIOff_to_haddr(MPI_Offset_buf_ptr[i]);
+ /* finally, pass the number of entries and the buffer pointer
+ * back to the caller.
+ */
+ *num_entries_ptr = num_entries;
+ *haddr_buf_ptr_ptr = haddr_buf_ptr;
- if(haddr_buf_ptr[i] == HADDR_UNDEF)
- HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert MPI off to haddr")
+done:
+ if(ret_value < 0)
+ if(haddr_buf_ptr)
+ haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
- i++;
- } /* end while */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_receive_haddr_list() */
+#endif /* H5_HAVE_PARALLEL */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC__receive_and_apply_clean_list()
+ *
+ * Purpose: Receive the list of cleaned entries from process 0,
+ * and mark the specified entries as clean.
+ *
+ * This function must only be called by the process with
+ * MPI_rank greater than 0.
+ *
+ * Return SUCCEED on success, and FAIL on failure.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 7/4/05
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t dxpl_id)
+{
+ H5AC_t * cache_ptr;
+ H5AC_aux_t * aux_ptr;
+ haddr_t * haddr_buf_ptr = NULL;
+ int num_entries = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank != 0);
+
+ /* Retrieve the clean list from process 0 */
+ if(H5AC__receive_haddr_list(aux_ptr->mpi_comm, &num_entries, &haddr_buf_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't receive clean list")
+
+ if(num_entries > 0)
/* mark the indicated entries as clean */
- if(H5C_mark_entries_as_clean(f, primary_dxpl_id, secondary_dxpl_id,
- (int32_t)num_entries, &(haddr_buf_ptr[0])) < 0)
+ if(H5C_mark_entries_as_clean(f, dxpl_id, (int32_t)num_entries, haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't mark entries clean.")
- } /* end if */
/* if it is defined, call the sync point done callback. Note
* that this callback is defined purely for testing purposes,
* and should be undefined under normal operating circumstances.
*/
- if(aux_ptr->sync_point_done != NULL)
+ if(aux_ptr->sync_point_done)
(aux_ptr->sync_point_done)(num_entries, haddr_buf_ptr);
done:
- if(MPI_Offset_buf_ptr != NULL)
- MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
- if(haddr_buf_ptr != NULL)
+ if(haddr_buf_ptr)
haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_receive_and_apply_clean_list() */
+} /* H5AC__receive_and_apply_clean_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_receive_candidate_list()
+ * Function: H5AC__receive_candidate_list()
*
* Purpose: Receive the list of candidate entries from process 0,
* and return it in a buffer pointed to by *haddr_buf_ptr_ptr.
@@ -4375,105 +3722,39 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_receive_candidate_list(H5AC_t * cache_ptr,
- int * num_entries_ptr,
- haddr_t ** haddr_buf_ptr_ptr)
+H5AC__receive_candidate_list(const H5AC_t *cache_ptr, int *num_entries_ptr,
+ haddr_t **haddr_buf_ptr_ptr)
{
- hbool_t success = FALSE;
H5AC_aux_t * aux_ptr;
- haddr_t * haddr_buf_ptr = NULL;
- MPI_Offset * MPI_Offset_buf_ptr = NULL;
- int mpi_result;
- int num_entries;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->mpi_rank != 0);
+ HDassert(aux_ptr-> metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert(num_entries_ptr != NULL);
+ HDassert(*num_entries_ptr == 0);
+ HDassert(haddr_buf_ptr_ptr != NULL);
+ HDassert(*haddr_buf_ptr_ptr == NULL);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->mpi_rank != 0 );
- HDassert( aux_ptr-> metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
-
- HDassert( num_entries_ptr != NULL );
- HDassert( *num_entries_ptr == 0 );
-
- HDassert( haddr_buf_ptr_ptr != NULL );
- HDassert( *haddr_buf_ptr_ptr == NULL );
-
-
- /* First receive the number of entries in the list so that we
- * can set up a buffer to receive them. If there aren't
- * any, we are done.
- */
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
-
- if(num_entries > 0) {
- size_t buf_size;
- int i;
-
- /* allocate buffers to store the list of entry base addresses in */
- buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
-
- if(NULL == (MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for receive buffer")
- if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * (size_t)num_entries)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for haddr buffer")
-
- /* Now receive the list of candidate entries
- *
- * The peculiar structure of the following call to MPI_Bcast is
- * due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
- * buf_size computed above.
- */
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
-
- /* translate the MPI_Offsets to haddr_t */
- i = 0;
- while(i < num_entries) {
- haddr_buf_ptr[i] = H5FD_mpi_MPIOff_to_haddr(MPI_Offset_buf_ptr[i]);
-
- if(haddr_buf_ptr[i] == HADDR_UNDEF)
- HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert MPI off to haddr")
-
- i++;
- } /* end while */
- } /* end if */
-
- success = TRUE;
+ /* Retrieve the candidate list from process 0 */
+ if(H5AC__receive_haddr_list(aux_ptr->mpi_comm, num_entries_ptr, haddr_buf_ptr_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't receive clean list")
done:
- if(MPI_Offset_buf_ptr != NULL)
- MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
-
- if(success) {
- /* finally, pass the number of entries and the buffer pointer
- * back to the caller. Do this so that we can use the same code
- * to apply the candidate list to all the processes.
- */
- *num_entries_ptr = num_entries;
- *haddr_buf_ptr_ptr = haddr_buf_ptr;
- } /* end if */
- else {
- if(haddr_buf_ptr != NULL)
- haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
- } /* end else */
-
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_receive_candidate_list() */
+} /* H5AC__receive_candidate_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_rsp__dist_md_write__flush
+ * Function: H5AC__rsp__dist_md_write__flush
*
* Purpose: Routine for handling the details of running a sync point
* that is triggered by a flush -- which in turn must have been
@@ -4522,31 +3803,27 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_rsp__dist_md_write__flush(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+static herr_t
+H5AC__rsp__dist_md_write__flush(H5F_t *f, hid_t dxpl_id)
{
- int mpi_code;
- int num_entries = 0;
- haddr_t * haddr_buf_ptr = NULL;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
+ haddr_t * haddr_buf_ptr = NULL;
+ int mpi_result;
+ int num_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f != NULL );
- HDassert( f->shared->cache == cache_ptr );
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
/* first construct the candidate list -- initially, this will be in the
* form of a skip list. We will convert it later.
@@ -4554,33 +3831,30 @@ H5AC_rsp__dist_md_write__flush(H5F_t *f,
if(H5C_construct_candidate_list__clean_cache(cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate list.")
- if(aux_ptr->candidate_slist_len > 0) {
+ if(H5SL_count(aux_ptr->candidate_slist_ptr) > 0) {
herr_t result;
/* convert the candidate list into the format we
* are used to receiving from process 0.
*/
- if(H5AC_copy_candidate_list_to_buffer(cache_ptr, &num_entries, &haddr_buf_ptr, NULL, NULL) < 0)
+ if(H5AC__copy_candidate_list_to_buffer(cache_ptr, &num_entries, &haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer.")
/* initial sync point barrier */
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
- /* apply the candidate list */
+ /* Enable writes during this operation */
aux_ptr->write_permitted = TRUE;
- result = H5C_apply_candidate_list(f,
- dxpl_id,
- dxpl_id,
- cache_ptr,
- num_entries,
- haddr_buf_ptr,
- aux_ptr->mpi_rank,
- aux_ptr->mpi_size);
+ /* Apply the candidate list */
+ result = H5C_apply_candidate_list(f, dxpl_id, cache_ptr, num_entries,
+ haddr_buf_ptr, aux_ptr->mpi_rank, aux_ptr->mpi_size);
+ /* Disable writes again */
aux_ptr->write_permitted = FALSE;
+ /* Check for error on the write operation */
if(result < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't apply candidate list.")
@@ -4588,40 +3862,39 @@ H5AC_rsp__dist_md_write__flush(H5F_t *f,
* enforce posix semantics on the server that pretends to be a
* file system in our parallel tests.
*/
- if(aux_ptr->write_done != NULL)
+ if(aux_ptr->write_done)
(aux_ptr->write_done)();
/* final sync point barrier */
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
/* if this is process zero, tidy up the dirtied,
* and flushed and still clean lists.
*/
- if(aux_ptr->mpi_rank == 0) {
- if(H5AC_tidy_cache_0_lists(cache_ptr, num_entries, haddr_buf_ptr) < 0)
+ if(aux_ptr->mpi_rank == 0)
+ if(H5AC__tidy_cache_0_lists(cache_ptr, num_entries, haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't tidy up process 0 lists.")
- } /* end if */
} /* end if */
/* if it is defined, call the sync point done callback. Note
* that this callback is defined purely for testing purposes,
* and should be undefined under normal operating circumstances.
*/
- if(aux_ptr->sync_point_done != NULL)
+ if(aux_ptr->sync_point_done)
(aux_ptr->sync_point_done)(num_entries, haddr_buf_ptr);
done:
- if(haddr_buf_ptr != NULL)
+ if(haddr_buf_ptr)
haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_rsp__dist_md_write__flush() */
+} /* H5AC__rsp__dist_md_write__flush() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_rsp__dist_md_write__flush_to_min_clean
+ * Function: H5AC__rsp__dist_md_write__flush_to_min_clean
*
* Purpose: Routine for handling the details of running a sync point
* triggered by the accumulation of dirty metadata (as
@@ -4675,29 +3948,25 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_rsp__dist_md_write__flush_to_min_clean(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+static herr_t
+H5AC__rsp__dist_md_write__flush_to_min_clean(H5F_t *f, hid_t dxpl_id)
{
- hbool_t evictions_enabled;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
+ hbool_t evictions_enabled;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f != NULL );
- HDassert( f->shared->cache == cache_ptr );
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
/* Query if evictions are allowed */
if(H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled) < 0)
@@ -4705,24 +3974,23 @@ H5AC_rsp__dist_md_write__flush_to_min_clean(H5F_t *f,
if(evictions_enabled) {
/* construct candidate list -- process 0 only */
- if(aux_ptr->mpi_rank == 0) {
- if(H5AC_construct_candidate_list(cache_ptr, aux_ptr, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ if(aux_ptr->mpi_rank == 0)
+ if(H5AC__construct_candidate_list(cache_ptr, aux_ptr, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate list.")
- } /* mpi rank == 0 */
/* propagate and apply candidate list -- all processes */
- if(H5AC_propagate_and_apply_candidate_list(f, dxpl_id, cache_ptr) < 0)
+ if(H5AC__propagate_and_apply_candidate_list(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate and apply candidate list.")
} /* evictions enabled */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_rsp__dist_md_write__flush_to_min_clean() */
+} /* H5AC__rsp__dist_md_write__flush_to_min_clean() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_rsp__p0_only__flush
+ * Function: H5AC__rsp__p0_only__flush
*
* Purpose: Routine for handling the details of running a sync point
* that is triggered a flush -- which in turn must have been
@@ -4758,67 +4026,70 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_rsp__p0_only__flush(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+static herr_t
+H5AC__rsp__p0_only__flush(H5F_t *f, hid_t dxpl_id)
{
- int mpi_code;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
+ int mpi_result;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f != NULL );
- HDassert( f->shared->cache == cache_ptr );
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY );
-
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
/* to prevent "messages from the future" we must
* synchronize all processes before we start the flush.
* Hence the following barrier.
*/
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
/* Flush data to disk, from rank 0 process */
if(aux_ptr->mpi_rank == 0) {
herr_t result;
+ /* Enable writes during this operation */
aux_ptr->write_permitted = TRUE;
- result = H5C_flush_cache(f, dxpl_id, dxpl_id, H5AC__NO_FLAGS_SET);
+ /* Flush the cache */
+ result = H5C_flush_cache(f, dxpl_id, H5AC__NO_FLAGS_SET);
+ /* Disable writes again */
aux_ptr->write_permitted = FALSE;
+ /* Check for error on the write operation */
if(result < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
- if(aux_ptr->write_done != NULL)
+ /* this code exists primarily for the test bed -- it allows us to
+ * enforce posix semantics on the server that pretends to be a
+ * file system in our parallel tests.
+ */
+ if(aux_ptr->write_done)
(aux_ptr->write_done)();
} /* end if */
/* Propagate cleaned entries to other ranks. */
- if(H5AC_propagate_flushed_and_still_clean_entries_list(f, H5AC_dxpl_id, cache_ptr) < 0)
+ if(H5AC__propagate_flushed_and_still_clean_entries_list(f, H5AC_dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_rsp__p0_only__flush() */
+} /* H5AC__rsp__p0_only__flush() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_rsp__p0_only__flush_to_min_clean
+ * Function: H5AC__rsp__p0_only__flush_to_min_clean
*
* Purpose: Routine for handling the details of running a sync point
* triggered by the accumulation of dirty metadata (as
@@ -4860,29 +4131,25 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
- hid_t dxpl_id,
- H5AC_t * cache_ptr)
+static herr_t
+H5AC__rsp__p0_only__flush_to_min_clean(H5F_t *f, hid_t dxpl_id)
{
- hbool_t evictions_enabled;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
+ hbool_t evictions_enabled;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f != NULL );
- HDassert( f->shared->cache == cache_ptr );
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
/* Query if evictions are allowed */
if(H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled) < 0)
@@ -4896,14 +4163,14 @@ H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
* Otherwise, do nothing.
*/
if(evictions_enabled) {
- int mpi_code;
+ int mpi_result;
/* to prevent "messages from the future" we must synchronize all
* processes before we start the flush. This synchronization may
* already be done -- hence the do_barrier parameter.
*/
- if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code)
+ if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
if(0 == aux_ptr->mpi_rank) {
herr_t result;
@@ -4913,12 +4180,17 @@ H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
* Note that it is quite possible that no entries will be
* flushed.
*/
+
+ /* Enable writes during this operation */
aux_ptr->write_permitted = TRUE;
- result = H5C_flush_to_min_clean(f, dxpl_id, H5AC_dxpl_id);
+ /* Flush the cache */
+ result = H5C_flush_to_min_clean(f, dxpl_id);
+ /* Disable writes again */
aux_ptr->write_permitted = FALSE;
+ /* Check for error on the write operation */
if(result < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_flush_to_min_clean() failed.")
@@ -4926,22 +4198,22 @@ H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
* to enforce POSIX semantics on the process used to simulate
* reads and writes in t_cache.c.
*/
- if(aux_ptr->write_done != NULL)
+ if(aux_ptr->write_done)
(aux_ptr->write_done)();
} /* end if */
- if(H5AC_propagate_flushed_and_still_clean_entries_list(f, dxpl_id, cache_ptr) < 0)
+ if(H5AC__propagate_flushed_and_still_clean_entries_list(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
} /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_rsp__p0_only__flush_to_min_clean() */
+} /* H5AC__rsp__p0_only__flush_to_min_clean() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_run_sync_point
+ * Function: H5AC__run_sync_point
*
* Purpose: Top level routine for managing a sync point between all
* meta data caches in the parallel case. Since all caches
@@ -4972,56 +4244,49 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_run_sync_point(H5F_t *f,
- hid_t dxpl_id,
- int sync_point_op)
+static herr_t
+H5AC__run_sync_point(H5F_t *f, hid_t dxpl_id, int sync_point_op)
{
- H5AC_t * cache_ptr;
+ H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f != NULL );
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
+ HDassert(f != NULL);
cache_ptr = f->shared->cache;
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
-
- HDassert( ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN ) ||
- ( sync_point_op == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED ) );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) ||
+ (sync_point_op == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED));
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- HDfprintf(stdout,
- "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu/%u\n",
- aux_ptr->mpi_rank,
- aux_ptr->dirty_bytes_propagations,
- aux_ptr->unprotect_dirty_bytes,
- aux_ptr->unprotect_dirty_bytes_updates,
- aux_ptr->insert_dirty_bytes,
- aux_ptr->insert_dirty_bytes_updates,
- aux_ptr->rename_dirty_bytes,
- aux_ptr->rename_dirty_bytes_updates);
+HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu/%u\n",
+ aux_ptr->mpi_rank,
+ aux_ptr->dirty_bytes_propagations,
+ aux_ptr->unprotect_dirty_bytes,
+ aux_ptr->unprotect_dirty_bytes_updates,
+ aux_ptr->insert_dirty_bytes,
+ aux_ptr->insert_dirty_bytes_updates,
+ aux_ptr->rename_dirty_bytes,
+ aux_ptr->rename_dirty_bytes_updates);
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
switch(aux_ptr->metadata_write_strategy) {
case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
switch(sync_point_op) {
case H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN:
- if(H5AC_rsp__p0_only__flush_to_min_clean(f, dxpl_id, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__p0_only__flush_to_min_clean() failed.")
+ if(H5AC__rsp__p0_only__flush_to_min_clean(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC__rsp__p0_only__flush_to_min_clean() failed.")
break;
case H5AC_SYNC_POINT_OP__FLUSH_CACHE:
- if(H5AC_rsp__p0_only__flush(f, dxpl_id, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__p0_only__flush() failed.")
+ if(H5AC__rsp__p0_only__flush(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC__rsp__p0_only__flush() failed.")
break;
default:
@@ -5033,13 +4298,13 @@ H5AC_run_sync_point(H5F_t *f,
case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
switch(sync_point_op) {
case H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN:
- if(H5AC_rsp__dist_md_write__flush_to_min_clean(f, dxpl_id, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__dist_md_write__flush() failed.")
+ if(H5AC__rsp__dist_md_write__flush_to_min_clean(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC__rsp__dist_md_write__flush_to_min_clean() failed.")
break;
case H5AC_SYNC_POINT_OP__FLUSH_CACHE:
- if(H5AC_rsp__dist_md_write__flush(f, dxpl_id, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__dist_md_write__flush() failed.")
+ if(H5AC__rsp__dist_md_write__flush(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC__rsp__dist_md_write__flush() failed.")
break;
default:
@@ -5068,12 +4333,12 @@ H5AC_run_sync_point(H5F_t *f,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_run_sync_point() */
+} /* H5AC__run_sync_point() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_tidy_cache_0_lists()
+ * Function: H5AC__tidy_cache_0_lists()
*
* Purpose: In the distributed metadata write strategy, not all dirty
* entries are written by process 0 -- thus we must tidy
@@ -5109,29 +4374,24 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_tidy_cache_0_lists(H5AC_t * cache_ptr,
- int num_candidates,
- haddr_t * candidates_list_ptr)
-
+H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, int num_candidates,
+ haddr_t *candidates_list_ptr)
{
- int i;
H5AC_aux_t * aux_ptr;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
+ int i;
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ FUNC_ENTER_STATIC_NOERR
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
-
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- HDassert( aux_ptr->metadata_write_strategy ==
- H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
- HDassert( aux_ptr->mpi_rank == 0 );
- HDassert( num_candidates > 0 );
- HDassert( candidates_list_ptr != NULL );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ HDassert(aux_ptr->mpi_rank == 0);
+ HDassert(num_candidates > 0);
+ HDassert(candidates_list_ptr != NULL);
/* clean up dirtied and flushed and still clean lists by removing
* all entries on the candidate list. Cleared entries should
@@ -5147,50 +4407,22 @@ H5AC_tidy_cache_0_lists(H5AC_t * cache_ptr,
addr = candidates_list_ptr[i];
- /* addr must be either on the dirtied list, or on the flushed
+ /* addr may be either on the dirtied list, or on the flushed
* and still clean list. Remove it.
*/
- d_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->d_slist_ptr, (void *)&addr);
- if(d_slist_entry_ptr != NULL) {
- HDassert(d_slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert(d_slist_entry_ptr->addr == addr);
-
- if(H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr)) != d_slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from dirty entry slist.")
-
- d_slist_entry_ptr->magic = 0;
+ if(NULL != (d_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->d_slist_ptr, (void *)&addr)))
d_slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, d_slist_entry_ptr);
-
- aux_ptr->d_slist_len -= 1;
-
- HDassert(aux_ptr->d_slist_len >= 0);
- } /* end if */
-
- c_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->c_slist_ptr, (void *)&addr);
- if(c_slist_entry_ptr != NULL) {
- HDassert(c_slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
- HDassert(c_slist_entry_ptr->addr == addr);
-
- if(H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr)) != c_slist_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from clean entry slist.")
-
- c_slist_entry_ptr->magic = 0;
+ if(NULL != (c_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->c_slist_ptr, (void *)&addr)))
c_slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, c_slist_entry_ptr);
-
- aux_ptr->c_slist_len -= 1;
-
- HDassert( aux_ptr->c_slist_len >= 0 );
- } /* end if */
} /* end for */
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_tidy_cache_0_lists() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5AC__tidy_cache_0_lists() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_flush_entries
+ * Function: H5AC__flush_entries
*
* Purpose: Flush the metadata cache associated with the specified file,
* only writing from rank 0, but propagating the cleaned entries
@@ -5206,25 +4438,25 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-herr_t
-H5AC_flush_entries(H5F_t *f)
+static herr_t
+H5AC__flush_entries(H5F_t *f, hid_t dxpl_id)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared->cache);
/* Check if we have >1 ranks */
- if(f->shared->cache->aux_ptr) {
- if(H5AC_run_sync_point(f, H5AC_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_CACHE) < 0)
+ if(f->shared->cache->aux_ptr)
+ if(H5AC__run_sync_point(f, dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_CACHE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
- } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_flush_entries() */
+} /* H5AC__flush_entries() */
#endif /* H5_HAVE_PARALLEL */
@@ -5244,15 +4476,13 @@ done:
*------------------------------------------------------------------------------
*/
herr_t
-H5AC_ignore_tags(H5F_t * f)
+H5AC_ignore_tags(const H5F_t *f)
{
- /* Variable Declarations */
herr_t ret_value = SUCCEED;
- /* Function Enter Macro */
FUNC_ENTER_NOAPI(FAIL)
- /* Assertions */
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
HDassert(f->shared->cache);
@@ -5279,28 +4509,25 @@ done:
*------------------------------------------------------------------------------
*/
herr_t
-H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t * prev_tag)
+H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t *prev_tag)
{
- /* Variable Declarations */
- H5P_genplist_t *dxpl; /* dataset transfer property list */
+ H5P_genplist_t *dxpl; /* dataset transfer property list */
herr_t ret_value = SUCCEED;
- /* Function Enter Macro */
FUNC_ENTER_NOAPI(FAIL)
/* Check Arguments */
if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "not a property list")
- /* Get the current tag value and return that (if prev_tag is NOT null)*/
- if(prev_tag) {
+ /* Get the current tag value and return that (if prev_tag is NOT null) */
+ if(prev_tag)
if((H5P_get(dxpl, "H5AC_metadata_tag", prev_tag)) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to query dxpl")
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "unable to query dxpl")
/* Set the provided tag value in the dxpl_id. */
if(H5P_set(dxpl, "H5AC_metadata_tag", &metadata_tag) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set property in dxpl")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set property in dxpl")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -5322,21 +4549,17 @@ done:
*------------------------------------------------------------------------------
*/
herr_t
-H5AC_retag_copied_metadata(H5F_t * f, haddr_t metadata_tag)
+H5AC_retag_copied_metadata(const H5F_t *f, haddr_t metadata_tag)
{
- herr_t ret_value = SUCCEED;
-
- /* Function Enter Macro */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
- /* Assertions */
+ /* Sanity checks */
HDassert(f);
HDassert(f->shared);
/* Call cache-level function to retag entries */
H5C_retag_copied_metadata(f->shared->cache, metadata_tag);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5AC_retag_copied_metadata */
diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h
index 5b8ac86..28965d2 100644
--- a/src/H5ACpkg.h
+++ b/src/H5ACpkg.h
@@ -297,11 +297,6 @@
* To reitterate, this field is only used on process 0 -- it
* should be NULL on all other processes.
*
- * d_slist_len: Integer field containing the number of entries in the
- * dirty entry list. This field should always contain the
- * value 0 on all processes other than process 0. It exists
- * primarily for sanity checking.
- *
* c_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
* of entries that were dirty, have been flushed
* to disk since the last clean entries broadcast, and are
@@ -312,11 +307,6 @@
* the next clean entries broadcast. The list emptied after
* each broadcast.
*
- * c_slist_len: Integer field containing the number of entries in the clean
- * entries list (*c_slist_ptr). This field should always
- * contain the value 0 on all processes other than process 0.
- * It exists primarily for sanity checking.
- *
* The following two fields are used only when metadata_write_strategy
* is H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED.
*
@@ -325,9 +315,6 @@
* point. This list is then broadcast to the other processes,
* which then either flush or mark clean all entries on it.
*
- * candidate_slist_len: Integer field containing the number of entries on the
- * candidate list. It exists primarily for sanity checking.
- *
* write_done: In the parallel test bed, it is necessary to ensure that
* all writes to the server process from cache 0 complete
* before it enters the barrier call with the other caches.
@@ -394,16 +381,10 @@ typedef struct H5AC_aux_t
H5SL_t * d_slist_ptr;
- int32_t d_slist_len;
-
H5SL_t * c_slist_ptr;
- int32_t c_slist_len;
-
H5SL_t * candidate_slist_ptr;
- int32_t candidate_slist_len;
-
void (* write_done)(void);
void (* sync_point_done)(int num_writes,
@@ -411,6 +392,12 @@ typedef struct H5AC_aux_t
} H5AC_aux_t; /* struct H5AC_aux_t */
+/* Package scoped functions */
+H5_DLL herr_t H5AC_set_sync_point_done_callback(H5C_t *cache_ptr,
+ void (*sync_point_done)(int num_writes, haddr_t *written_entries_tbl));
+H5_DLL herr_t H5AC_set_write_done_callback(H5C_t * cache_ptr,
+ void (* write_done)(void));
+
#endif /* H5_HAVE_PARALLEL */
#endif /* _H5ACpkg_H */
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index e259a24..584ce9d 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -79,6 +79,7 @@ typedef enum {
H5AC_FARRAY_DBLOCK_ID, /*fixed array data block */
H5AC_FARRAY_DBLK_PAGE_ID, /*fixed array data block page */
H5AC_SUPERBLOCK_ID, /* file superblock */
+ H5AC_DRVRINFO_ID, /* driver info block (supplements superblock)*/
H5AC_TEST_ID, /*test entry -- not used for actual files */
H5AC_NTYPES /* Number of types, must be last */
} H5AC_type_t;
@@ -91,6 +92,17 @@ typedef enum {
* times for debugging purposes.
*
* Hence the following, somewhat odd set of #defines.
+ *
+ * NOTE: test/cache plays games with the f->shared->cache, and thus
+ * setting H5AC_DUMP_STATS_ON_CLOSE will generate constant,
+ * irrelevant data when run with that test program. See
+ * comments on setup_cache() / takedown_cache() in test/cache_common.c.
+ * for details.
+ *
+ * If you need to dump stats at file close in test/cache.c,
+ * use the dump_stats parameter to takedown_cache(), or call
+ * H5C_stats() directly.
+ * JRM -- 4/12/15
*/
#if H5C_COLLECT_CACHE_STATS
@@ -113,81 +125,48 @@ typedef enum {
/*
* Class methods pertaining to caching. Each type of cached object will
* have a constant variable with permanent life-span that describes how
- * to cache the object. That variable will be of type H5AC_class_t and
- * have the following required fields...
- *
- * LOAD: Loads an object from disk to memory. The function
- * should allocate some data structure and return it.
- *
- * FLUSH: Writes some data structure back to disk. It would be
- * wise for the data structure to include dirty flags to
- * indicate whether it really needs to be written. This
- * function is also responsible for freeing memory allocated
- * by the LOAD method if the DEST argument is non-zero (by
- * calling the DEST method).
- *
- * DEST: Just frees memory allocated by the LOAD method.
- *
- * CLEAR: Just marks object as non-dirty.
- *
- * NOTIFY: Notify client that an action on an entry has taken/will take
- * place
- *
- * SIZE: Report the size (on disk) of the specified cache object.
- * Note that the space allocated on disk may not be contiguous.
+ * to cache the object.
*/
-#define H5AC_CALLBACK__NO_FLAGS_SET H5C_CALLBACK__NO_FLAGS_SET
-#define H5AC_CALLBACK__SIZE_CHANGED_FLAG H5C_CALLBACK__SIZE_CHANGED_FLAG
-#define H5AC_CALLBACK__MOVED_FLAG H5C_CALLBACK__MOVED_FLAG
+#define H5AC__SERIALIZE_RESIZED_FLAG H5C__SERIALIZE_RESIZED_FLAG
+#define H5AC__SERIALIZE_MOVED_FLAG H5C__SERIALIZE_MOVED_FLAG
+#define H5AC__SERIALIZE_COMPRESSED_FLAG H5C__SERIALIZE_COMPRESSED_FLAG
/* Aliases for 'notify action' type & values */
typedef H5C_notify_action_t H5AC_notify_action_t;
#define H5AC_NOTIFY_ACTION_AFTER_INSERT H5C_NOTIFY_ACTION_AFTER_INSERT
+#define H5AC_NOTIFY_ACTION_AFTER_LOAD H5C_NOTIFY_ACTION_AFTER_LOAD
+#define H5AC_NOTIFY_ACTION_AFTER_FLUSH H5C_NOTIFY_ACTION_AFTER_FLUSH
#define H5AC_NOTIFY_ACTION_BEFORE_EVICT H5C_NOTIFY_ACTION_BEFORE_EVICT
-typedef H5C_load_func_t H5AC_load_func_t;
-typedef H5C_flush_func_t H5AC_flush_func_t;
-typedef H5C_dest_func_t H5AC_dest_func_t;
-typedef H5C_clear_func_t H5AC_clear_func_t;
-typedef H5C_notify_func_t H5AC_notify_func_t;
-typedef H5C_size_func_t H5AC_size_func_t;
+#define H5AC__CLASS_NO_FLAGS_SET H5C__CLASS_NO_FLAGS_SET
+#define H5AC__CLASS_SPECULATIVE_LOAD_FLAG H5C__CLASS_SPECULATIVE_LOAD_FLAG
+#define H5AC__CLASS_COMPRESSED_FLAG H5C__CLASS_COMPRESSED_FLAG
-typedef H5C_class_t H5AC_class_t;
+/* The following flags should only appear in test code */
+#define H5AC__CLASS_NO_IO_FLAG H5C__CLASS_NO_IO_FLAG
+#define H5AC__CLASS_SKIP_READS H5C__CLASS_SKIP_READS
+#define H5AC__CLASS_SKIP_WRITES H5C__CLASS_SKIP_WRITES
+typedef H5C_get_load_size_func_t H5AC_get_load_size_func_t;
+typedef H5C_deserialize_func_t H5AC_deserialize_func_t;
+typedef H5C_image_len_func_t H5AC_image_len_func_t;
-/* The H5AC_NSLOTS #define is now obsolete, as the metadata cache no longer
- * uses slots. However I am leaving it in for now to avoid modifying the
- * interface between the metadata cache and the rest of HDF. It should
- * be removed when we get to dealing with the size_hint parameter in
- * H5AC_create().
- * JRM - 5/20/04
- *
- * Old comment on H5AC_NSLOTS follows:
- *
- * A cache has a certain number of entries. Objects are mapped into a
- * cache entry by hashing the object's file address. Each file has its
- * own cache, an array of slots.
- */
-#define H5AC_NSLOTS 10330 /* The library "likes" this number... */
-
+#define H5AC__SERIALIZE_NO_FLAGS_SET H5C__SERIALIZE_NO_FLAGS_SET
+#define H5AC__SERIALIZE_RESIZED_FLAG H5C__SERIALIZE_RESIZED_FLAG
+#define H5AC__SERIALIZE_MOVED_FLAG H5C__SERIALIZE_MOVED_FLAG
-typedef H5C_cache_entry_t H5AC_info_t;
+typedef H5C_pre_serialize_func_t H5AC_pre_serialize_func_t;
+typedef H5C_serialize_func_t H5AC_serialize_func_t;
+typedef H5C_notify_func_t H5AC_notify_func_t;
+typedef H5C_free_icr_func_t H5AC_free_icr_func_t;
+typedef H5C_clear_func_t H5AC_clear_func_t;
+typedef H5C_get_fsf_size_t H5AC_get_fsf_size_t;
+typedef H5C_class_t H5AC_class_t;
-/*===----------------------------------------------------------------------===
- * Protect Types
- *===----------------------------------------------------------------------===
- *
- * These are for the wrapper functions to H5AC_protect. They specify what
- * type of operation you're planning on doing to the metadata. The
- * Flexible Parallel HDF5 locking can then act accordingly.
- */
-
-typedef enum H5AC_protect_t {
- H5AC_WRITE, /* Protect object for writing */
- H5AC_READ /* Protect object for reading */
-} H5AC_protect_t;
+/* Cache entry info */
+typedef H5C_cache_entry_t H5AC_info_t;
/* Typedef for metadata cache (defined in H5Cpkg.h) */
@@ -317,6 +296,7 @@ H5_DLLVAR hid_t H5AC_ind_dxpl_id;
#define H5AC__FLUSH_CLEAR_ONLY_FLAG H5C__FLUSH_CLEAR_ONLY_FLAG
#define H5AC__FLUSH_MARKED_ENTRIES_FLAG H5C__FLUSH_MARKED_ENTRIES_FLAG
#define H5AC__FLUSH_IGNORE_PROTECTED_FLAG H5C__FLUSH_IGNORE_PROTECTED_FLAG
+#define H5AC__READ_ONLY_FLAG H5C__READ_ONLY_FLAG
#define H5AC__FREE_FILE_SPACE_FLAG H5C__FREE_FILE_SPACE_FLAG
#define H5AC__TAKE_OWNERSHIP_FLAG H5C__TAKE_OWNERSHIP_FLAG
#define H5AC__FLUSH_LAST_FLAG H5C__FLUSH_LAST_FLAG
@@ -346,7 +326,7 @@ H5_DLL herr_t H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *typ
H5_DLL herr_t H5AC_pin_protected_entry(void *thing);
H5_DLL herr_t H5AC_create_flush_dependency(void *parent_thing, void *child_thing);
H5_DLL void * H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
- haddr_t addr, void *udata, H5AC_protect_t rw);
+ haddr_t addr, void *udata, unsigned flags);
H5_DLL herr_t H5AC_resize_entry(void *thing, size_t new_size);
H5_DLL herr_t H5AC_unpin_entry(void *thing);
H5_DLL herr_t H5AC_destroy_flush_dependency(void *parent_thing, void *child_thing);
@@ -359,12 +339,6 @@ H5_DLL herr_t H5AC_move_entry(H5F_t *f, const H5AC_class_t *type,
H5_DLL herr_t H5AC_dest(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id,
const H5AC_class_t *type, haddr_t addr, unsigned flags);
-H5_DLL herr_t H5AC_set_sync_point_done_callback(H5C_t *cache_ptr,
- void (*sync_point_done)(int num_writes, haddr_t *written_entries_tbl));
-H5_DLL herr_t H5AC_set_write_done_callback(H5C_t * cache_ptr,
- void (* write_done)(void));
-H5_DLL herr_t H5AC_stats(const H5F_t *f);
-H5_DLL herr_t H5AC_dump_cache(const H5F_t *f);
H5_DLL herr_t H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr,
@@ -374,27 +348,24 @@ H5_DLL herr_t H5AC_reset_cache_hit_rate_stats(H5AC_t *cache_ptr);
H5_DLL herr_t H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr,
H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t *config_ptr);
-
H5_DLL herr_t H5AC_close_trace_file(H5AC_t *cache_ptr);
H5_DLL herr_t H5AC_open_trace_file(H5AC_t *cache_ptr, const char *trace_file_name);
-H5_DLL herr_t H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t * prev_tag);
-H5_DLL herr_t H5AC_retag_copied_metadata(H5F_t * f, haddr_t metadata_tag);
-H5_DLL herr_t H5AC_ignore_tags(H5F_t * f);
+H5_DLL herr_t H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t *prev_tag);
+H5_DLL herr_t H5AC_retag_copied_metadata(const H5F_t *f, haddr_t metadata_tag);
+H5_DLL herr_t H5AC_ignore_tags(const H5F_t *f);
#ifdef H5_HAVE_PARALLEL
H5_DLL herr_t H5AC_add_candidate(H5AC_t * cache_ptr, haddr_t addr);
#endif /* H5_HAVE_PARALLEL */
#ifndef NDEBUG /* debugging functions */
-
+H5_DLL herr_t H5AC_stats(const H5F_t *f);
+H5_DLL herr_t H5AC_dump_cache(const H5F_t *f);
H5_DLL herr_t H5AC_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr,
- void ** entry_ptr_ptr);
-
+ void ** entry_ptr_ptr);
H5_DLL herr_t H5AC_verify_entry_type(const H5F_t * f, haddr_t addr,
- const H5AC_class_t * expected_type,
- hbool_t * in_cache_ptr,
- hbool_t * type_ok_ptr);
-
+ const H5AC_class_t * expected_type, hbool_t * in_cache_ptr,
+ hbool_t * type_ok_ptr);
#endif /* NDEBUG */ /* end debugging functions */
#endif /* !_H5ACprivate_H */
diff --git a/src/H5B.c b/src/H5B.c
index 765a57e..b1f3388 100644
--- a/src/H5B.c
+++ b/src/H5B.c
@@ -336,7 +336,7 @@ H5B_find(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void *u
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
- if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
+ if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
rt = bt->nchildren;
@@ -485,7 +485,7 @@ H5B__split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
cache_udata.f = f;
cache_udata.type = shared->type;
cache_udata.rc_shared = bt_ud->bt->rc_shared;
- if(NULL == (split_bt_ud->bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, split_bt_ud->addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (split_bt_ud->bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, split_bt_ud->addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree")
split_bt_ud->bt->level = bt_ud->bt->level;
@@ -518,7 +518,7 @@ H5B__split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
if(H5F_addr_defined(bt_ud->bt->right)) {
H5B_t *tmp_bt;
- if(NULL == (tmp_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt_ud->bt->right, &cache_udata, H5AC_WRITE)))
+ if(NULL == (tmp_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt_ud->bt->right, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load right sibling")
tmp_bt->left = split_bt_ud->addr;
@@ -597,7 +597,7 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
bt_ud.addr = addr;
- if(NULL == (bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to locate root of B-tree")
/* Insert the object */
@@ -901,7 +901,7 @@ H5B__insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
* Follow the minimum branch out of this node to a subtree.
*/
child_bt_ud.addr = bt->child[idx];
- if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node")
if((int)(my_ins = H5B__insert_helper(f, dxpl_id, &child_bt_ud, type,
@@ -947,7 +947,7 @@ H5B__insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
*/
idx = bt->nchildren - 1;
child_bt_ud.addr = bt->child[idx];
- if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node")
if((int)(my_ins = H5B__insert_helper(f, dxpl_id, &child_bt_ud, type,
@@ -1002,7 +1002,7 @@ H5B__insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
*/
HDassert(idx < bt->nchildren);
child_bt_ud.addr = bt->child[idx];
- if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node")
if((int)(my_ins = H5B__insert_helper(f, dxpl_id, &child_bt_ud, type,
@@ -1164,7 +1164,7 @@ H5B__iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t ad
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
- if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
+ if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to load B-tree node")
/* Iterate over node's children */
@@ -1287,7 +1287,7 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
- if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load B-tree node")
rt = bt->nchildren;
@@ -1387,7 +1387,7 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
* "critical" for any child in its node to maintain this
* consistency (and avoid breaking key/child consistency) */
if(H5F_addr_defined(bt->left)) {
- if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->left, &cache_udata, H5AC_WRITE)))
+ if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->left, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node from tree")
/* Copy right-most key from deleted node to right-most key
@@ -1404,7 +1404,7 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
sibling = NULL; /* Make certain future references will be caught */
} /* end if */
if(H5F_addr_defined(bt->right)) {
- if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->right, &cache_udata, H5AC_WRITE)))
+ if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->right, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to unlink node from tree")
/* Copy left-most key from deleted node to left-most key in
@@ -1524,7 +1524,7 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
HDassert(level > 0);
/* Update the rightmost key in the left sibling */
- if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->left, &cache_udata, H5AC_WRITE)))
+ if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->left, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to protect node")
HDmemcpy(H5B_NKEY(sibling, shared, sibling->nchildren),
@@ -1539,7 +1539,7 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
HDassert(level > 0);
/* Update the lefttmost key in the right sibling */
- if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->right, &cache_udata, H5AC_WRITE)))
+ if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->right, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to protect node")
HDmemcpy(H5B_NKEY(sibling, shared, 0),
@@ -1646,7 +1646,7 @@ H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
- if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
/* Iterate over all children in tree, deleting them */
@@ -1908,7 +1908,7 @@ H5B__get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t a
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
- if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
+ if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
/* Cache information from this node */
@@ -1932,7 +1932,7 @@ H5B__get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t a
while(H5F_addr_defined(next_addr)) {
/* Protect the next node to the right */
addr = next_addr;
- if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
+ if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "B-tree node")
/* Cache information from this node */
@@ -2059,7 +2059,7 @@ H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr)
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
- if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
+ if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree node")
done:
diff --git a/src/H5B2.c b/src/H5B2.c
index 0c0f24f..7b5aa16 100644
--- a/src/H5B2.c
+++ b/src/H5B2.c
@@ -155,8 +155,9 @@ H5B2_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam, void *ctx_udat
/* Look up the B-tree header */
cache_udata.f = f;
+ cache_udata.addr = hdr_addr;
cache_udata.ctx_udata = ctx_udata;
- if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to load B-tree header")
/* Point v2 B-tree wrapper at header and bump it's ref count */
@@ -215,8 +216,9 @@ H5B2_open(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata)
/* Look up the B-tree header */
cache_udata.f = f;
+ cache_udata.addr = addr;
cache_udata.ctx_udata = ctx_udata;
- if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC_READ)))
+ if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to load B-tree header")
/* Check for pending heap deletion */
@@ -479,7 +481,7 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Locate node pointer for child */
@@ -542,7 +544,7 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate record */
@@ -662,7 +664,7 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
unsigned u; /* Local index variable */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Search for record with correct index */
@@ -734,7 +736,7 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Sanity check index */
@@ -1070,7 +1072,7 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_WRITE)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Locate node pointer for child */
@@ -1142,7 +1144,7 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
hbool_t changed = FALSE;/* Whether the 'modify' callback changed the record */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_WRITE)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate record */
@@ -1276,7 +1278,7 @@ H5B2_close(H5B2_t *bt2, hid_t dxpl_id)
/* Lock the v2 B-tree header into memory */
/* (OK to pass in NULL for callback context, since we know the header must be in the cache) */
- if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(bt2->f, dxpl_id, H5AC_BT2_HDR, bt2_addr, NULL, H5AC_WRITE)))
+ if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(bt2->f, dxpl_id, H5AC_BT2_HDR, bt2_addr, NULL, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect v2 B-tree header")
/* Set the shared v2 B-tree header's file context for this operation */
@@ -1352,8 +1354,9 @@ H5B2_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata,
HDfprintf(stderr, "%s: addr = %a\n", FUNC, addr);
#endif /* QAK */
cache_udata.f = f;
+ cache_udata.addr = addr;
cache_udata.ctx_udata = ctx_udata;
- if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect v2 B-tree header")
/* Remember the callback & context for later */
diff --git a/src/H5B2cache.c b/src/H5B2cache.c
index 3b30e02..64e304c 100644
--- a/src/H5B2cache.c
+++ b/src/H5B2cache.c
@@ -37,7 +37,6 @@
#include "H5private.h" /* Generic Functions */
#include "H5B2pkg.h" /* v2 B-trees */
#include "H5Eprivate.h" /* Error handling */
-#include "H5MFprivate.h" /* File memory management */
#include "H5WBprivate.h" /* Wrapped Buffers */
@@ -50,9 +49,6 @@
#define H5B2_INT_VERSION 0 /* Internal node */
#define H5B2_LEAF_VERSION 0 /* Leaf node */
-/* Size of stack buffer for serialized headers */
-#define H5B2_HDR_BUF_SIZE 128
-
/******************/
/* Local Typedefs */
@@ -69,22 +65,32 @@
/********************/
/* Metadata cache callbacks */
-static H5B2_hdr_t *H5B2__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5B2__cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B2_hdr_t *hdr, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5B2__cache_hdr_dest(H5F_t *f, H5B2_hdr_t *hdr);
-static herr_t H5B2__cache_hdr_clear(H5F_t *f, H5B2_hdr_t *hdr, hbool_t destroy);
-static herr_t H5B2__cache_hdr_size(const H5F_t *f, const H5B2_hdr_t *hdr, size_t *size_ptr);
-static H5B2_internal_t *H5B2__cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5B2__cache_internal_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B2_internal_t *i, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5B2__cache_internal_dest(H5F_t *f, H5B2_internal_t *internal);
-static herr_t H5B2__cache_internal_clear(H5F_t *f, H5B2_internal_t *i, hbool_t destroy);
-static herr_t H5B2__cache_internal_size(const H5F_t *f, const H5B2_internal_t *i, size_t *size_ptr);
-static H5B2_leaf_t *H5B2__cache_leaf_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5B2__cache_leaf_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B2_leaf_t *l, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5B2__cache_leaf_dest(H5F_t *f, H5B2_leaf_t *leaf);
-static herr_t H5B2__cache_leaf_clear(H5F_t *f, H5B2_leaf_t *l, hbool_t destroy);
-static herr_t H5B2__cache_leaf_size(const H5F_t *f, const H5B2_leaf_t *l, size_t *size_ptr);
-
+static herr_t H5B2__cache_hdr_get_load_size(const void *udata, size_t *image_len);
+static void *H5B2__cache_hdr_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5B2__cache_hdr_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5B2__cache_hdr_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5B2__cache_hdr_free_icr(void *thing);
+
+static herr_t H5B2__cache_int_get_load_size(const void *udata, size_t *image_len);
+static void *H5B2__cache_int_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5B2__cache_int_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5B2__cache_int_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5B2__cache_int_free_icr(void *thing);
+
+static herr_t H5B2__cache_leaf_get_load_size(const void *udata, size_t *image_len);
+static void *H5B2__cache_leaf_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5B2__cache_leaf_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5B2__cache_leaf_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5B2__cache_leaf_free_icr(void *thing);
/*********************/
/* Package Variables */
@@ -92,35 +98,53 @@ static herr_t H5B2__cache_leaf_size(const H5F_t *f, const H5B2_leaf_t *l, size_t
/* H5B2 inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_BT2_HDR[1] = {{
- H5AC_BT2_HDR_ID,
- (H5AC_load_func_t)H5B2__cache_hdr_load,
- (H5AC_flush_func_t)H5B2__cache_hdr_flush,
- (H5AC_dest_func_t)H5B2__cache_hdr_dest,
- (H5AC_clear_func_t)H5B2__cache_hdr_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5B2__cache_hdr_size,
+ H5AC_BT2_HDR_ID, /* Metadata client ID */
+ "v2 B-tree header", /* Metadata client name (for debugging) */
+ H5FD_MEM_BTREE, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5B2__cache_hdr_get_load_size, /* 'get_load_size' callback */
+ H5B2__cache_hdr_deserialize, /* 'deserialize' callback */
+ H5B2__cache_hdr_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5B2__cache_hdr_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5B2__cache_hdr_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
/* H5B2 inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_BT2_INT[1] = {{
- H5AC_BT2_INT_ID,
- (H5AC_load_func_t)H5B2__cache_internal_load,
- (H5AC_flush_func_t)H5B2__cache_internal_flush,
- (H5AC_dest_func_t)H5B2__cache_internal_dest,
- (H5AC_clear_func_t)H5B2__cache_internal_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5B2__cache_internal_size,
+ H5AC_BT2_INT_ID, /* Metadata client ID */
+ "v2 B-tree internal node", /* Metadata client name (for debugging) */
+ H5FD_MEM_BTREE, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5B2__cache_int_get_load_size, /* 'get_load_size' callback */
+ H5B2__cache_int_deserialize, /* 'deserialize' callback */
+ H5B2__cache_int_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5B2__cache_int_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5B2__cache_int_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
/* H5B2 inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_BT2_LEAF[1] = {{
- H5AC_BT2_LEAF_ID,
- (H5AC_load_func_t)H5B2__cache_leaf_load,
- (H5AC_flush_func_t)H5B2__cache_leaf_flush,
- (H5AC_dest_func_t)H5B2__cache_leaf_dest,
- (H5AC_clear_func_t)H5B2__cache_leaf_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5B2__cache_leaf_size,
+ H5AC_BT2_LEAF_ID, /* Metadata client ID */
+ "v2 B-tree leaf node", /* Metadata client name (for debugging) */
+ H5FD_MEM_BTREE, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5B2__cache_leaf_get_load_size, /* 'get_load_size' callback */
+ H5B2__cache_leaf_deserialize, /* 'deserialize' callback */
+ H5B2__cache_leaf_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5B2__cache_leaf_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5B2__cache_leaf_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
@@ -136,7 +160,38 @@ const H5AC_class_t H5AC_BT2_LEAF[1] = {{
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_hdr_load
+ * Function: H5B2__cache_hdr_get_load_size
+ *
+ * Purpose: Compute the size of the data structure on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * May 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5B2__cache_hdr_get_load_size(const void *_udata, size_t *image_len)
+{
+ const H5B2_hdr_cache_ud_t *udata = (const H5B2_hdr_cache_ud_t *)_udata; /* User data for callback */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(image_len);
+
+ /* Set the image length size */
+ *image_len = H5B2_HEADER_SIZE_FILE(udata->f);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5B2__cache_hdr_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5B2__cache_hdr_deserialize
*
* Purpose: Loads a B-tree header from the disk.
*
@@ -149,8 +204,9 @@ const H5AC_class_t H5AC_BT2_LEAF[1] = {{
*
*-------------------------------------------------------------------------
*/
-static H5B2_hdr_t *
-H5B2__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+static void *
+H5B2__cache_hdr_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
+ void *_udata, hbool_t H5_ATTR_UNUSED *dirty)
{
H5B2_hdr_t *hdr = NULL; /* B-tree header */
H5B2_hdr_cache_ud_t *udata = (H5B2_hdr_cache_ud_t *)_udata;
@@ -159,78 +215,59 @@ H5B2__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
uint16_t depth; /* Depth of B-tree */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
- H5WB_t *wb = NULL; /* Wrapped buffer for header data */
- uint8_t hdr_buf[H5B2_HDR_BUF_SIZE]; /* Buffer for header */
- uint8_t *buf; /* Pointer to header buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
H5B2_hdr_t *ret_value; /* Return value */
FUNC_ENTER_STATIC
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ HDassert(image);
HDassert(udata);
/* Allocate new B-tree header and reset cache info */
if(NULL == (hdr = H5B2__hdr_alloc(udata->f)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "allocation failed for B-tree header")
- /* Wrap the local buffer for serialized header info */
- if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
- HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, NULL, "can't wrap buffer")
-
- /* Get a pointer to a buffer that's large enough for header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, hdr->hdr_size)))
- HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, NULL, "can't get actual buffer")
-
- /* Read header from disk */
- if(H5F_block_read(f, H5FD_MEM_BTREE, addr, hdr->hdr_size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_READERROR, NULL, "can't read B-tree header")
-
- /* Get temporary pointer to serialized header */
- p = buf;
-
/* Magic number */
- if(HDmemcmp(p, H5B2_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5B2_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "wrong B-tree header signature")
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5B2_HDR_VERSION)
+ if(*image++ != H5B2_HDR_VERSION)
HGOTO_ERROR(H5E_BTREE, H5E_BADRANGE, NULL, "wrong B-tree header version")
/* B-tree class */
- id = (H5B2_subid_t)*p++;
+ id = (H5B2_subid_t)*image++;
if(id >= H5B2_NUM_BTREE_ID)
HGOTO_ERROR(H5E_BTREE, H5E_BADTYPE, NULL, "incorrect B-tree type")
/* Node size (in bytes) */
- UINT32DECODE(p, cparam.node_size);
+ UINT32DECODE(image, cparam.node_size);
/* Raw key size (in bytes) */
- UINT16DECODE(p, cparam.rrec_size);
+ UINT16DECODE(image, cparam.rrec_size);
/* Depth of tree */
- UINT16DECODE(p, depth);
+ UINT16DECODE(image, depth);
/* Split & merge %s */
- cparam.split_percent = *p++;
- cparam.merge_percent = *p++;
+ cparam.split_percent = *image++;
+ cparam.merge_percent = *image++;
/* Root node pointer */
- H5F_addr_decode(udata->f, (const uint8_t **)&p, &(hdr->root.addr));
- UINT16DECODE(p, hdr->root.node_nrec);
- H5F_DECODE_LENGTH(udata->f, p, hdr->root.all_nrec);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &(hdr->root.addr));
+ UINT16DECODE(image, hdr->root.node_nrec);
+ H5F_DECODE_LENGTH(udata->f, image, hdr->root.all_nrec);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - (const uint8_t *)buf) == hdr->hdr_size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == hdr->hdr_size);
/* Compute checksum on entire header */
- computed_chksum = H5_checksum_metadata(buf, (hdr->hdr_size - H5B2_SIZEOF_CHKSUM), 0);
+ computed_chksum = H5_checksum_metadata(_image, (hdr->hdr_size - H5B2_SIZEOF_CHKSUM), 0);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -242,131 +279,59 @@ H5B2__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, NULL, "can't initialize B-tree header info")
/* Set the B-tree header's address */
- hdr->addr = addr;
+ hdr->addr = udata->addr;
+
+ /* Sanity check */
+ HDassert((size_t)(image - (const uint8_t *)_image) <= len);
/* Set return value */
ret_value = hdr;
done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_BTREE, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
if(!ret_value && hdr)
if(H5B2__hdr_free(hdr) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, NULL, "can't release v2 B-tree header")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2__cache_hdr_load() */ /*lint !e818 Can't make udata a pointer to const */
+} /* end H5B2__cache_hdr_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_hdr_flush
+ * Function: H5B2__cache_hdr_image_len
*
- * Purpose: Flushes a dirty B-tree header to disk.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 1 2005
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * May 20, 2010
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- H5B2_hdr_t *hdr, unsigned H5_ATTR_UNUSED * flags_ptr)
+H5B2__cache_hdr_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- H5WB_t *wb = NULL; /* Wrapped buffer for header data */
- uint8_t hdr_buf[H5B2_HDR_BUF_SIZE]; /* Buffer for header */
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5B2_hdr_t *hdr = (const H5B2_hdr_t *)_thing; /* Pointer to the B-tree header */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC_NOERR
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
HDassert(hdr);
+ HDassert(image_len);
- if(hdr->cache_info.is_dirty) {
- uint8_t *buf; /* Pointer to header buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
-
- /* Set the B-tree header's file context for this operation */
- hdr->f = f;
-
- /* Wrap the local buffer for serialized header info */
- if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
- HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "can't wrap buffer")
-
- /* Get a pointer to a buffer that's large enough for header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, hdr->hdr_size)))
- HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, FAIL, "can't get actual buffer")
-
- /* Get temporary pointer to serialized header */
- p = buf;
-
- /* Magic number */
- HDmemcpy(p, H5B2_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* Version # */
- *p++ = H5B2_HDR_VERSION;
-
- /* B-tree type */
- *p++ = hdr->cls->id;
+ /* Set the image length size */
+ *image_len = hdr->hdr_size;
- /* Node size (in bytes) */
- UINT32ENCODE(p, hdr->node_size);
-
- /* Raw key size (in bytes) */
- UINT16ENCODE(p, hdr->rrec_size);
-
- /* Depth of tree */
- UINT16ENCODE(p, hdr->depth);
-
- /* Split & merge %s */
- H5_CHECK_OVERFLOW(hdr->split_percent, /* From: */ unsigned, /* To: */ uint8_t);
- *p++ = (uint8_t)hdr->split_percent;
- H5_CHECK_OVERFLOW(hdr->merge_percent, /* From: */ unsigned, /* To: */ uint8_t);
- *p++ = (uint8_t)hdr->merge_percent;
-
- /* Root node pointer */
- H5F_addr_encode(f, &p, hdr->root.addr);
- UINT16ENCODE(p, hdr->root.node_nrec);
- H5F_ENCODE_LENGTH(f, p, hdr->root.all_nrec);
-
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(buf, (hdr->hdr_size - H5B2_SIZEOF_CHKSUM), 0);
-
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
-
- /* Write the B-tree header. */
- HDassert((size_t)(p - buf) == hdr->hdr_size);
- if(H5F_block_write(f, H5FD_MEM_BTREE, addr, hdr->hdr_size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFLUSH, FAIL, "unable to save B-tree header to disk")
-
- hdr->cache_info.is_dirty = FALSE;
- } /* end if */
-
- if(destroy)
- if(H5B2__cache_hdr_dest(f, hdr) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree header")
-
-done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_BTREE, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer")
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2__cache_hdr_flush() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5B2__cache_hdr_image_len() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_hdr_dest
+ * Function: H5B2__cache_hdr_serialize
*
- * Purpose: Destroys a B-tree header in memory.
+ * Purpose: Flushes a dirty B-tree header to disk.
*
* Return: Non-negative on success/Negative on failure
*
@@ -377,109 +342,132 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_hdr_dest(H5F_t *f, H5B2_hdr_t *hdr)
+H5B2__cache_hdr_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5B2_hdr_t *hdr = (H5B2_hdr_t *)_thing; /* Pointer to the B-tree header */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC_NOERR
- /* Check arguments */
+ /* check arguments */
+ HDassert(f);
+ HDassert(image);
HDassert(hdr);
- HDassert(hdr->rc == 0);
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!hdr->cache_info.free_file_space_on_destroy || H5F_addr_defined(hdr->cache_info.addr));
+ /* Magic number */
+ HDmemcpy(image, H5B2_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
+
+ /* Version # */
+ *image++ = H5B2_HDR_VERSION;
- /* Check for freeing file space for B-tree header */
- if(hdr->cache_info.free_file_space_on_destroy) {
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_BTREE, H5AC_dxpl_id, hdr->cache_info.addr, (hsize_t)hdr->hdr_size) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free v2 B-tree header")
- } /* end if */
+ /* B-tree type */
+ *image++ = hdr->cls->id;
- /* Release B-tree header info */
- if(H5B2__hdr_free(hdr) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free v2 B-tree header info")
+ /* Node size (in bytes) */
+ UINT32ENCODE(image, hdr->node_size);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2__cache_hdr_dest() */
+ /* Raw key size (in bytes) */
+ UINT16ENCODE(image, hdr->rrec_size);
+
+ /* Depth of tree */
+ UINT16ENCODE(image, hdr->depth);
+
+ /* Split & merge %s */
+ H5_CHECK_OVERFLOW(hdr->split_percent, /* From: */ unsigned, /* To: */ uint8_t);
+ *image++ = (uint8_t)hdr->split_percent;
+ H5_CHECK_OVERFLOW(hdr->merge_percent, /* From: */ unsigned, /* To: */ uint8_t);
+ *image++ = (uint8_t)hdr->merge_percent;
+
+ /* Root node pointer */
+ H5F_addr_encode(f, &image, hdr->root.addr);
+ UINT16ENCODE(image, hdr->root.node_nrec);
+ H5F_ENCODE_LENGTH(f, image, hdr->root.all_nrec);
+
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (hdr->hdr_size - H5B2_SIZEOF_CHKSUM), 0);
+
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
+
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5B2__cache_hdr_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_hdr_clear
+ * Function: H5B2__cache_hdr_free_icr
*
- * Purpose: Mark a B-tree header in memory as non-dirty.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 1 2005
+ * Programmer: Mike McGreevy
+ * mcgreevy@hdfgroup.org
+ * June 18, 2008
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_hdr_clear(H5F_t *f, H5B2_hdr_t *hdr, hbool_t destroy)
+H5B2__cache_hdr_free_icr(void *thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
- HDassert(hdr);
-
- /* Reset the dirty flag. */
- hdr->cache_info.is_dirty = FALSE;
+ /* Check arguments */
+ HDassert(thing);
- if(destroy)
- if(H5B2__cache_hdr_dest(f, hdr) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree header")
+ /* Destroy v2 B-tree header */
+ if(H5B2__hdr_free((H5B2_hdr_t *)thing) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free v2 B-tree header")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2__cache_hdr_clear() */
+} /* H5B2__cache_hdr_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_hdr_size
+ * Function: H5B2__cache_int_get_load_size
*
- * Purpose: Compute the size in bytes of a B-tree header
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: SUCCEED (Can't fail)
+ * Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
- * Feb 1 2005
+ * May 18, 2010
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_hdr_size(const H5F_t H5_ATTR_UNUSED *f, const H5B2_hdr_t *hdr, size_t *size_ptr)
+H5B2__cache_int_get_load_size(const void *_udata, size_t *image_len)
{
+ const H5B2_internal_cache_ud_t *udata = (const H5B2_internal_cache_ud_t *)_udata; /* User data for callback */
+
FUNC_ENTER_STATIC_NOERR
/* Check arguments */
- HDassert(f);
- HDassert(hdr);
- HDassert(size_ptr);
+ HDassert(udata);
+ HDassert(udata->hdr);
+ HDassert(image_len);
- /* Set size value */
- *size_ptr = hdr->hdr_size;
+ /* Set the image length size */
+ *image_len = udata->hdr->node_size;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5B2__cache_hdr_size() */
+} /* end H5B2__cache_int_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_internal_load
+ * Function: H5B2__cache_int_deserialize
*
- * Purpose: Loads a B-tree internal node from the disk.
+ * Purpose: Deserialize a B-tree internal node from the disk.
*
* Return: Success: Pointer to a new B-tree internal node.
* Failure: NULL
@@ -490,12 +478,13 @@ H5B2__cache_hdr_size(const H5F_t H5_ATTR_UNUSED *f, const H5B2_hdr_t *hdr, size_
*
*-------------------------------------------------------------------------
*/
-static H5B2_internal_t *
-H5B2__cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+static void *
+H5B2__cache_int_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
+ void *_udata, hbool_t H5_ATTR_UNUSED *dirty)
{
H5B2_internal_cache_ud_t *udata = (H5B2_internal_cache_ud_t *)_udata; /* Pointer to user data */
H5B2_internal_t *internal = NULL; /* Internal node read */
- const uint8_t *p; /* Pointer into raw data buffer */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint8_t *native; /* Pointer to native record info */
H5B2_node_ptr_t *int_node_ptr; /* Pointer to node pointer info */
uint32_t stored_chksum; /* Stored metadata checksum value */
@@ -506,8 +495,7 @@ H5B2__cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
FUNC_ENTER_STATIC
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ HDassert(image);
HDassert(udata);
/* Allocate new internal node and reset cache info */
@@ -515,9 +503,6 @@ H5B2__cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
HDmemset(&internal->cache_info, 0, sizeof(H5AC_info_t));
- /* Set the B-tree header's file context for this operation */
- udata->hdr->f = f;
-
/* Increment ref. count on B-tree header */
if(H5B2__hdr_incr(udata->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, NULL, "can't increment ref. count on B-tree header")
@@ -525,23 +510,17 @@ H5B2__cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Share B-tree information */
internal->hdr = udata->hdr;
- /* Read header from disk */
- if(H5F_block_read(f, H5FD_MEM_BTREE, addr, udata->hdr->node_size, dxpl_id, udata->hdr->page) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_READERROR, NULL, "can't read B-tree internal node")
-
- p = udata->hdr->page;
-
/* Magic number */
- if(HDmemcmp(p, H5B2_INT_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5B2_INT_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "wrong B-tree internal node signature")
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5B2_INT_VERSION)
+ if(*image++ != H5B2_INT_VERSION)
HGOTO_ERROR(H5E_BTREE, H5E_BADRANGE, NULL, "wrong B-tree internal node version")
/* B-tree type */
- if(*p++ != (uint8_t)udata->hdr->cls->id)
+ if(*image++ != (uint8_t)udata->hdr->cls->id)
HGOTO_ERROR(H5E_BTREE, H5E_BADTYPE, NULL, "incorrect B-tree type")
/* Allocate space for the native keys in memory */
@@ -560,11 +539,11 @@ H5B2__cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
native = internal->int_native;
for(u = 0; u < internal->nrec; u++) {
/* Decode record */
- if((udata->hdr->cls->decode)(p, native, udata->hdr->cb_ctx) < 0)
+ if((udata->hdr->cls->decode)(image, native, udata->hdr->cb_ctx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDECODE, NULL, "unable to decode B-tree record")
/* Move to next record */
- p += udata->hdr->rrec_size;
+ image += udata->hdr->rrec_size;
native += udata->hdr->cls->nrec_size;
} /* end for */
@@ -572,10 +551,10 @@ H5B2__cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
int_node_ptr = internal->node_ptrs;
for(u = 0; u < (unsigned)(internal->nrec + 1); u++) {
/* Decode node pointer */
- H5F_addr_decode(udata->f, (const uint8_t **)&p, &(int_node_ptr->addr));
- UINT64DECODE_VAR(p, int_node_ptr->node_nrec, udata->hdr->max_nrec_size);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &(int_node_ptr->addr));
+ UINT64DECODE_VAR(image, int_node_ptr->node_nrec, udata->hdr->max_nrec_size);
if(udata->depth > 1)
- UINT64DECODE_VAR(p, int_node_ptr->all_nrec, udata->hdr->node_info[udata->depth - 1].cum_max_nrec_size)
+ UINT64DECODE_VAR(image, int_node_ptr->all_nrec, udata->hdr->node_info[udata->depth - 1].cum_max_nrec_size)
else
int_node_ptr->all_nrec = int_node_ptr->node_nrec;
@@ -584,13 +563,13 @@ H5B2__cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
} /* end for */
/* Compute checksum on internal node */
- computed_chksum = H5_checksum_metadata(udata->hdr->page, (size_t)(p - (const uint8_t *)udata->hdr->page), 0);
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check parsing */
- HDassert((size_t)(p - (const uint8_t *)udata->hdr->page) <= udata->hdr->node_size);
+ HDassert((size_t)(image - (const uint8_t *)_image) <= len);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -605,224 +584,194 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree internal node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2__cache_internal_load() */ /*lint !e818 Can't make udata a pointer to const */
+} /* H5B2__cache_int_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_internal_flush
+ * Function: H5B2__cache_int_image_len
*
- * Purpose: Flushes a dirty B-tree internal node to disk.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 3 2005
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * May 20, 2010
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_internal_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B2_internal_t *internal, unsigned H5_ATTR_UNUSED * flags_ptr)
+H5B2__cache_int_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5B2_internal_t *internal = (const H5B2_internal_t *)_thing; /* Pointer to the B-tree internal node */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC_NOERR
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
HDassert(internal);
HDassert(internal->hdr);
+ HDassert(image_len);
- if(internal->cache_info.is_dirty) {
- uint8_t *p; /* Pointer into raw data buffer */
- uint8_t *native; /* Pointer to native record info */
- H5B2_node_ptr_t *int_node_ptr; /* Pointer to node pointer info */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
- unsigned u; /* Local index variable */
-
- /* Set the B-tree header's file context for this operation */
- internal->hdr->f = f;
-
- p = internal->hdr->page;
-
- /* Magic number */
- HDmemcpy(p, H5B2_INT_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* Version # */
- *p++ = H5B2_INT_VERSION;
-
- /* B-tree type */
- *p++ = internal->hdr->cls->id;
- HDassert((size_t)(p - internal->hdr->page) == (H5B2_INT_PREFIX_SIZE - H5B2_SIZEOF_CHKSUM));
-
- /* Serialize records for internal node */
- native = internal->int_native;
- for(u = 0; u < internal->nrec; u++) {
- /* Encode record */
- if((internal->hdr->cls->encode)(p, native, internal->hdr->cb_ctx) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree record")
-
- /* Move to next record */
- p += internal->hdr->rrec_size;
- native += internal->hdr->cls->nrec_size;
- } /* end for */
-
- /* Serialize node pointers for internal node */
- int_node_ptr = internal->node_ptrs;
- for(u = 0; u < (unsigned)(internal->nrec + 1); u++) {
- /* Encode node pointer */
- H5F_addr_encode(f, &p, int_node_ptr->addr);
- UINT64ENCODE_VAR(p, int_node_ptr->node_nrec, internal->hdr->max_nrec_size);
- if(internal->depth > 1)
- UINT64ENCODE_VAR(p, int_node_ptr->all_nrec, internal->hdr->node_info[internal->depth - 1].cum_max_nrec_size);
-
- /* Move to next node pointer */
- int_node_ptr++;
- } /* end for */
-
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(internal->hdr->page, (size_t)(p - internal->hdr->page), 0);
-
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
-
- /* Write the B-tree internal node */
- HDassert((size_t)(p - internal->hdr->page) <= internal->hdr->node_size);
- if(H5F_block_write(f, H5FD_MEM_BTREE, addr, internal->hdr->node_size, dxpl_id, internal->hdr->page) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFLUSH, FAIL, "unable to save B-tree internal node to disk")
+ /* Set the image length size */
+ *image_len = internal->hdr->node_size;
- internal->cache_info.is_dirty = FALSE;
- } /* end if */
-
- if(destroy)
- if(H5B2__cache_internal_dest(f, internal) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree internal node")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2__cache_internal_flush() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5B2__cache_int_image_len() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_internal_dest
+ * Function: H5B2__cache_int_serialize
*
- * Purpose: Destroys a B-tree internal node in memory.
+ * Purpose: Serializes a B-tree internal node for writing to disk.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* koziol@ncsa.uiuc.edu
- * Feb 2 2005
+ * Feb 3 2005
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_internal_dest(H5F_t *f, H5B2_internal_t *internal)
+H5B2__cache_int_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5B2_internal_t *internal = (H5B2_internal_t *)_thing; /* Pointer to the B-tree internal node */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint8_t *native; /* Pointer to native record info */
+ H5B2_node_ptr_t *int_node_ptr; /* Pointer to node pointer info */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
- /* Check arguments */
+ /* check arguments */
HDassert(f);
+ HDassert(image);
HDassert(internal);
HDassert(internal->hdr);
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!internal->cache_info.free_file_space_on_destroy || H5F_addr_defined(internal->cache_info.addr));
+ /* Magic number */
+ HDmemcpy(image, H5B2_INT_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
+
+ /* Version # */
+ *image++ = H5B2_INT_VERSION;
- /* Check for freeing file space for B-tree internal node */
- if(internal->cache_info.free_file_space_on_destroy) {
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_BTREE, H5AC_dxpl_id, internal->cache_info.addr, (hsize_t)internal->hdr->node_size) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free v2 B-tree internal node")
- } /* end if */
+ /* B-tree type */
+ *image++ = internal->hdr->cls->id;
+ HDassert((size_t)(image - (uint8_t *)_image) == (H5B2_INT_PREFIX_SIZE - H5B2_SIZEOF_CHKSUM));
- /* Release v2 b-tree internal node */
- if(H5B2__internal_free(internal) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree internal node")
+ /* Serialize records for internal node */
+ native = internal->int_native;
+ for(u = 0; u < internal->nrec; u++) {
+ /* Encode record */
+ if((internal->hdr->cls->encode)(image, native, internal->hdr->cb_ctx) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree record")
+
+ /* Move to next record */
+ image += internal->hdr->rrec_size;
+ native += internal->hdr->cls->nrec_size;
+ } /* end for */
+
+ /* Serialize node pointers for internal node */
+ int_node_ptr = internal->node_ptrs;
+ for(u = 0; u < (unsigned)(internal->nrec + 1); u++) {
+ /* Encode node pointer */
+ H5F_addr_encode(f, &image, int_node_ptr->addr);
+ UINT64ENCODE_VAR(image, int_node_ptr->node_nrec, internal->hdr->max_nrec_size);
+ if(internal->depth > 1)
+ UINT64ENCODE_VAR(image, int_node_ptr->all_nrec, internal->hdr->node_info[internal->depth - 1].cum_max_nrec_size);
+
+ /* Move to next node pointer */
+ int_node_ptr++;
+ } /* end for */
+
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
+
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
+
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2__cache_internal_dest() */
+} /* H5B2__cache_int_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_internal_clear
+ * Function: H5B2__cache_int_free_icr
*
- * Purpose: Mark a B-tree internal node in memory as non-dirty.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 2 2005
+ * Programmer: Mike McGreevy
+ * mcgreevy@hdfgroup.org
+ * June 18, 2008
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_internal_clear(H5F_t *f, H5B2_internal_t *internal, hbool_t destroy)
+H5B2__cache_int_free_icr(void *thing)
{
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
- HDassert(internal);
-
- /* Reset the dirty flag. */
- internal->cache_info.is_dirty = FALSE;
+ /* Check arguments */
+ HDassert(thing);
- if(destroy)
- if(H5B2__cache_internal_dest(f, internal) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree internal node")
+ /* Release v2 B-tree internal node */
+ if(H5B2__internal_free((H5B2_internal_t *)thing) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree internal node")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2__cache_internal_clear() */
+} /* H5B2__cache_int_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_internal_size
+ * Function: H5B2__cache_leaf_get_load_size
*
- * Purpose: Compute the size in bytes of a B-tree internal node
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 2 2005
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * May 18, 2010
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_internal_size(const H5F_t H5_ATTR_UNUSED *f, const H5B2_internal_t *internal, size_t *size_ptr)
+H5B2__cache_leaf_get_load_size(const void *_udata, size_t *image_len)
{
+ const H5B2_leaf_cache_ud_t *udata = (const H5B2_leaf_cache_ud_t *)_udata; /* User data for callback */
+
FUNC_ENTER_STATIC_NOERR
/* Check arguments */
- HDassert(internal);
- HDassert(internal->hdr);
- HDassert(size_ptr);
+ HDassert(udata);
+ HDassert(udata->hdr);
+ HDassert(image_len);
- /* Set size value */
- *size_ptr = internal->hdr->node_size;
+ /* Set the image length size */
+ *image_len = udata->hdr->node_size;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5B2__cache_internal_size() */
+} /* end H5B2__cache_leaf_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_leaf_load
+ * Function: H5B2__cache_leaf_deserialize
*
- * Purpose: Loads a B-tree leaf from the disk.
+ * Purpose: Deserialize a B-tree leaf from the disk.
*
* Return: Success: Pointer to a new B-tree leaf node.
* Failure: NULL
@@ -833,12 +782,13 @@ H5B2__cache_internal_size(const H5F_t H5_ATTR_UNUSED *f, const H5B2_internal_t *
*
*-------------------------------------------------------------------------
*/
-static H5B2_leaf_t *
-H5B2__cache_leaf_load(H5F_t H5_ATTR_UNUSED *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+static void *
+H5B2__cache_leaf_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
+ void *_udata, hbool_t H5_ATTR_UNUSED *dirty)
{
H5B2_leaf_cache_ud_t *udata = (H5B2_leaf_cache_ud_t *)_udata;
H5B2_leaf_t *leaf = NULL; /* Pointer to lead node loaded */
- const uint8_t *p; /* Pointer into raw data buffer */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint8_t *native; /* Pointer to native keys */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
@@ -848,8 +798,7 @@ H5B2__cache_leaf_load(H5F_t H5_ATTR_UNUSED *f, hid_t dxpl_id, haddr_t addr, void
FUNC_ENTER_STATIC
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ HDassert(image);
HDassert(udata);
/* Allocate new leaf node and reset cache info */
@@ -857,9 +806,6 @@ H5B2__cache_leaf_load(H5F_t H5_ATTR_UNUSED *f, hid_t dxpl_id, haddr_t addr, void
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "memory allocation failed")
HDmemset(&leaf->cache_info, 0, sizeof(H5AC_info_t));
- /* Set the B-tree header's file context for this operation */
- udata->hdr->f = udata->f;
-
/* Increment ref. count on B-tree header */
if(H5B2__hdr_incr(udata->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, NULL, "can't increment ref. count on B-tree header")
@@ -867,23 +813,17 @@ H5B2__cache_leaf_load(H5F_t H5_ATTR_UNUSED *f, hid_t dxpl_id, haddr_t addr, void
/* Share B-tree header information */
leaf->hdr = udata->hdr;
- /* Read header from disk */
- if(H5F_block_read(udata->f, H5FD_MEM_BTREE, addr, udata->hdr->node_size, dxpl_id, udata->hdr->page) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_READERROR, NULL, "can't read B-tree leaf node")
-
- p = udata->hdr->page;
-
/* Magic number */
- if(HDmemcmp(p, H5B2_LEAF_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5B2_LEAF_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "wrong B-tree leaf node signature")
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5B2_LEAF_VERSION)
+ if(*image++ != H5B2_LEAF_VERSION)
HGOTO_ERROR(H5E_BTREE, H5E_BADRANGE, NULL, "wrong B-tree leaf node version")
/* B-tree type */
- if(*p++ != (uint8_t)udata->hdr->cls->id)
+ if(*image++ != (uint8_t)udata->hdr->cls->id)
HGOTO_ERROR(H5E_BTREE, H5E_BADTYPE, NULL, "incorrect B-tree type")
/* Allocate space for the native keys in memory */
@@ -897,27 +837,30 @@ H5B2__cache_leaf_load(H5F_t H5_ATTR_UNUSED *f, hid_t dxpl_id, haddr_t addr, void
native = leaf->leaf_native;
for(u = 0; u < leaf->nrec; u++) {
/* Decode record */
- if((udata->hdr->cls->decode)(p, native, udata->hdr->cb_ctx) < 0)
+ if((udata->hdr->cls->decode)(image, native, udata->hdr->cb_ctx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, NULL, "unable to decode B-tree record")
/* Move to next record */
- p += udata->hdr->rrec_size;
+ image += udata->hdr->rrec_size;
native += udata->hdr->cls->nrec_size;
} /* end for */
/* Compute checksum on leaf node */
- computed_chksum = H5_checksum_metadata(udata->hdr->page, (size_t)(p - (const uint8_t *)udata->hdr->page), 0);
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check parsing */
- HDassert((size_t)(p - (const uint8_t *)udata->hdr->page) <= udata->hdr->node_size);
+ HDassert((size_t)(image - (const uint8_t *)_image) <= udata->hdr->node_size);
/* Verify checksum */
if(stored_chksum != computed_chksum)
HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "incorrect metadata checksum for v2 leaf node")
+ /* Sanity check */
+ HDassert((size_t)(image - (const uint8_t *)_image) <= len);
+
/* Set return value */
ret_value = leaf;
@@ -927,96 +870,46 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree leaf node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2__cache_leaf_load() */ /*lint !e818 Can't make udata a pointer to const */
+} /* H5B2__cache_leaf_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_leaf_flush
+ * Function: H5B2__cache_leaf_image_len
*
- * Purpose: Flushes a dirty B-tree leaf node to disk.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 2 2005
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * May 20, 2010
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_leaf_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B2_leaf_t *leaf, unsigned H5_ATTR_UNUSED * flags_ptr)
+H5B2__cache_leaf_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5B2_leaf_t *leaf = (const H5B2_leaf_t *)_thing; /* Pointer to the B-tree leaf node */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC_NOERR
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
HDassert(leaf);
HDassert(leaf->hdr);
+ HDassert(image_len);
- if(leaf->cache_info.is_dirty) {
- uint8_t *p; /* Pointer into raw data buffer */
- uint8_t *native; /* Pointer to native keys */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
- unsigned u; /* Local index variable */
-
- /* Set the B-tree header's file context for this operation */
- leaf->hdr->f = f;
-
- p = leaf->hdr->page;
-
- /* magic number */
- HDmemcpy(p, H5B2_LEAF_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* version # */
- *p++ = H5B2_LEAF_VERSION;
-
- /* b-tree type */
- *p++ = leaf->hdr->cls->id;
- HDassert((size_t)(p - leaf->hdr->page) == (H5B2_LEAF_PREFIX_SIZE - H5B2_SIZEOF_CHKSUM));
+ /* Set the image length size */
+ *image_len = leaf->hdr->node_size;
- /* Serialize records for leaf node */
- native = leaf->leaf_native;
- for(u = 0; u < leaf->nrec; u++) {
- /* Encode record */
- if((leaf->hdr->cls->encode)(p, native, leaf->hdr->cb_ctx) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree record")
-
- /* Move to next record */
- p += leaf->hdr->rrec_size;
- native += leaf->hdr->cls->nrec_size;
- } /* end for */
-
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(leaf->hdr->page, (size_t)(p - leaf->hdr->page), 0);
-
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
-
- /* Write the B-tree leaf node */
- HDassert((size_t)(p - leaf->hdr->page) <= leaf->hdr->node_size);
- if(H5F_block_write(f, H5FD_MEM_BTREE, addr, leaf->hdr->node_size, dxpl_id, leaf->hdr->page) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFLUSH, FAIL, "unable to save B-tree leaf node to disk")
-
- leaf->cache_info.is_dirty = FALSE;
- } /* end if */
-
- if(destroy)
- if(H5B2__cache_leaf_dest(f, leaf) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree leaf node")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2__cache_leaf_flush() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5B2__cache_leaf_image_len() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_leaf_dest
+ * Function: H5B2__cache_leaf_serialize
*
- * Purpose: Destroys a B-tree leaf node in memory.
+ * Purpose: Serializes a B-tree leaf node for writing to disk.
*
* Return: Non-negative on success/Negative on failure
*
@@ -1027,102 +920,90 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_leaf_dest(H5F_t *f, H5B2_leaf_t *leaf)
+H5B2__cache_leaf_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5B2_leaf_t *leaf = (H5B2_leaf_t *)_thing; /* Pointer to the B-tree leaf node */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint8_t *native; /* Pointer to native keys */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
- /* Check arguments */
+ /* check arguments */
HDassert(f);
+ HDassert(image);
HDassert(leaf);
HDassert(leaf->hdr);
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!leaf->cache_info.free_file_space_on_destroy || H5F_addr_defined(leaf->cache_info.addr));
-
- /* Check for freeing file space for B-tree leaf node */
- if(leaf->cache_info.free_file_space_on_destroy) {
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_BTREE, H5AC_dxpl_id, leaf->cache_info.addr, (hsize_t)leaf->hdr->node_size) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free v2 B-tree leaf node")
- } /* end if */
+ /* magic number */
+ HDmemcpy(image, H5B2_LEAF_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- /* Destroy v2 b-tree leaf node */
- if(H5B2__leaf_free(leaf) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree leaf node")
+ /* version # */
+ *image++ = H5B2_LEAF_VERSION;
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2__cache_leaf_dest() */
+ /* B-tree type */
+ *image++ = leaf->hdr->cls->id;
+ HDassert((size_t)(image - (uint8_t *)_image) == (H5B2_LEAF_PREFIX_SIZE - H5B2_SIZEOF_CHKSUM));
-
-/*-------------------------------------------------------------------------
- * Function: H5B2__cache_leaf_clear
- *
- * Purpose: Mark a B-tree leaf node in memory as non-dirty.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 2 2005
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5B2__cache_leaf_clear(H5F_t *f, H5B2_leaf_t *leaf, hbool_t destroy)
-{
- herr_t ret_value = SUCCEED;
+ /* Serialize records for leaf node */
+ native = leaf->leaf_native;
+ for(u = 0; u < leaf->nrec; u++) {
+ /* Encode record */
+ if((leaf->hdr->cls->encode)(image, native, leaf->hdr->cb_ctx) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree record")
- FUNC_ENTER_STATIC
+ /* Move to next record */
+ image += leaf->hdr->rrec_size;
+ native += leaf->hdr->cls->nrec_size;
+ } /* end for */
- /*
- * Check arguments.
- */
- HDassert(leaf);
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)((const uint8_t *)image - (const uint8_t *)_image), 0);
- /* Reset the dirty flag. */
- leaf->cache_info.is_dirty = FALSE;
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
- if(destroy)
- if(H5B2__cache_leaf_dest(f, leaf) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree leaf node")
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2__cache_leaf_clear() */
+} /* H5B2__cache_leaf_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5B2__cache_leaf_size
+ * Function: H5B2__cache_leaf_free_icr
*
- * Purpose: Compute the size in bytes of a B-tree leaf node
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 2 2005
+ * Programmer: Mike McGreevy
+ * mcgreevy@hdfgroup.org
+ * June 18, 2008
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_leaf_size(const H5F_t H5_ATTR_UNUSED *f, const H5B2_leaf_t *leaf, size_t *size_ptr)
+H5B2__cache_leaf_free_icr(void *thing)
{
- FUNC_ENTER_STATIC_NOERR
+ herr_t ret_value = SUCCEED; /* Return value */
- /* check arguments */
- HDassert(leaf);
- HDassert(leaf->hdr);
- HDassert(size_ptr);
+ FUNC_ENTER_STATIC
- /* Set size value */
- *size_ptr = leaf->hdr->node_size;
+ /* Check arguments */
+ HDassert(thing);
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5B2__cache_leaf_size() */
+ /* Destroy v2 B-tree leaf node */
+ if(H5B2__leaf_free((H5B2_leaf_t *)thing) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree leaf node")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5B2__cache_leaf_free_icr() */
diff --git a/src/H5B2dbg.c b/src/H5B2dbg.c
index 50283f8..6e1250b 100644
--- a/src/H5B2dbg.c
+++ b/src/H5B2dbg.c
@@ -123,8 +123,9 @@ H5B2__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
* Load the B-tree header.
*/
cache_udata.f = f;
+ cache_udata.addr = addr;
cache_udata.ctx_udata = dbg_ctx;
- if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC_READ)))
+ if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree header")
/* Set file pointer for this B-tree operation */
@@ -241,8 +242,9 @@ H5B2__int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
* Load the B-tree header.
*/
cache_udata.f = f;
+ cache_udata.addr = hdr_addr;
cache_udata.ctx_udata = dbg_ctx;
- if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC_READ)))
+ if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree header")
/* Set file pointer for this B-tree operation */
@@ -253,7 +255,7 @@ H5B2__int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
*/
H5_CHECK_OVERFLOW(nrec, unsigned, uint16_t);
H5_CHECK_OVERFLOW(depth, unsigned, uint16_t);
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, addr, (uint16_t)nrec, (uint16_t)depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, addr, (uint16_t)nrec, (uint16_t)depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree internal node")
/* Print opening message */
@@ -375,8 +377,9 @@ H5B2__leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent
* Load the B-tree header.
*/
cache_udata.f = f;
+ cache_udata.addr = hdr_addr;
cache_udata.ctx_udata = dbg_ctx;
- if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC_READ)))
+ if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree header")
/* Set file pointer for this B-tree operation */
@@ -386,7 +389,7 @@ H5B2__leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent
* Load the B-tree leaf node
*/
H5_CHECK_OVERFLOW(nrec, unsigned, uint16_t);
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, addr, (uint16_t)nrec, H5AC_READ)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, addr, (uint16_t)nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Print opening message */
diff --git a/src/H5B2int.c b/src/H5B2int.c
index 638b4b3..0f884c2 100644
--- a/src/H5B2int.c
+++ b/src/H5B2int.c
@@ -215,9 +215,9 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Protect both leaves */
- if(NULL == (left_int = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (left_int = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_int = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (right_int = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for child nodes */
@@ -244,9 +244,9 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Protect both leaves */
- if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@@ -403,7 +403,7 @@ H5B2__split_root(H5B2_hdr_t *hdr, hid_t dxpl_id)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create new internal node")
/* Protect new root node */
- if(NULL == (new_root = H5B2__protect_internal(hdr, dxpl_id, hdr->root.addr, hdr->root.node_nrec, hdr->depth, H5AC_WRITE)))
+ if(NULL == (new_root = H5B2__protect_internal(hdr, dxpl_id, hdr->root.addr, hdr->root.node_nrec, hdr->depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set first node pointer in root node to old root node pointer info */
@@ -467,9 +467,9 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@@ -492,9 +492,9 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@@ -704,11 +704,11 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for child nodes */
@@ -737,11 +737,11 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@@ -1084,9 +1084,9 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for accessing child node information */
@@ -1109,9 +1109,9 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for accessing child node information */
@@ -1246,11 +1246,11 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for accessing child node information */
@@ -1279,11 +1279,11 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for accessing child node information */
@@ -1460,7 +1460,7 @@ H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
child_addr = internal->node_ptrs[idx].addr;
/* Lock B-tree child nodes */
- if(NULL == (child_internal = H5B2__protect_internal(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
+ if(NULL == (child_internal = H5B2__protect_internal(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for accessing child node information */
@@ -1475,7 +1475,7 @@ H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
child_addr = internal->node_ptrs[idx].addr;
/* Lock B-tree child node */
- if(NULL == (child_leaf = H5B2__protect_leaf(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (child_leaf = H5B2__protect_leaf(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for accessing child node information */
@@ -1538,7 +1538,7 @@ H5B2__insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr
HDassert(H5F_addr_defined(curr_node_ptr->addr));
/* Lock current B-tree node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Must have a leaf node with enough space to insert a record now */
@@ -1637,7 +1637,7 @@ H5B2__insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDassert(H5F_addr_defined(curr_node_ptr->addr));
/* Lock current B-tree node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Split or redistribute child node pointers, if necessary */
@@ -1834,7 +1834,7 @@ done:
*/
H5B2_leaf_t *
H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, uint16_t nrec,
- H5AC_protect_t rw)
+ unsigned flags)
{
H5B2_leaf_cache_ud_t udata; /* User-data for callback */
H5B2_leaf_t *ret_value; /* Return value */
@@ -1845,13 +1845,16 @@ H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, uint16_t nrec,
HDassert(hdr);
HDassert(H5F_addr_defined(addr));
+ /* only H5AC__READ_ONLY_FLAG may appear in flags */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Set up user data for callback */
udata.f = hdr->f;
udata.hdr = hdr;
H5_CHECKED_ASSIGN(udata.nrec, uint16_t, nrec, unsigned)
/* Protect the leaf node */
- if(NULL == (ret_value = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, addr, &udata, rw)))
+ if(NULL == (ret_value = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, addr, &udata, flags)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to protect B-tree leaf node")
done:
@@ -1953,7 +1956,7 @@ done:
*/
H5B2_internal_t *
H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
- uint16_t nrec, uint16_t depth, H5AC_protect_t rw)
+ uint16_t nrec, uint16_t depth, unsigned flags)
{
H5B2_internal_cache_ud_t udata; /* User data to pass through to cache 'deserialize' callback */
H5B2_internal_t *ret_value; /* Return value */
@@ -1965,6 +1968,9 @@ H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
HDassert(H5F_addr_defined(addr));
HDassert(depth > 0);
+ /* only H5AC__READ_ONLY_FLAG may appear in flags */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Set up user data for callback */
udata.f = hdr->f;
udata.hdr = hdr;
@@ -1972,7 +1978,7 @@ H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
udata.depth = depth;
/* Protect the internal node */
- if(NULL == (ret_value = (H5B2_internal_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_INT, addr, &udata, rw)))
+ if(NULL == (ret_value = (H5B2_internal_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_INT, addr, &udata, flags)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to protect B-tree internal node")
done:
@@ -2021,7 +2027,7 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_internal_t *internal; /* Pointer to internal node */
/* Lock the current B-tree node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set up information about current node */
@@ -2040,7 +2046,7 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_leaf_t *leaf; /* Pointer to leaf node */
/* Lock the current B-tree node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC_READ)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Set up information about current node */
@@ -2125,7 +2131,7 @@ H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr
/* Lock current B-tree node */
leaf_addr = curr_node_ptr->addr;
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Sanity check number of records */
@@ -2234,7 +2240,7 @@ H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
/* Lock current B-tree node */
internal_addr = curr_node_ptr->addr;
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Determine the correct number of records to merge at */
@@ -2443,7 +2449,7 @@ H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Lock B-tree leaf node */
leaf_addr = curr_node_ptr->addr;
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Sanity check number of records */
@@ -2551,7 +2557,7 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Lock current B-tree node */
internal_addr = curr_node_ptr->addr;
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
HDassert(internal->nrec == curr_node_ptr->node_nrec);
HDassert(depth == hdr->depth || internal->nrec > 1);
@@ -2827,7 +2833,7 @@ H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_p
HDassert(op);
/* Lock current B-tree node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC_READ)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate node pointer for child */
@@ -2914,7 +2920,7 @@ H5B2__neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDassert(op);
/* Lock current B-tree node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Locate node pointer for child */
@@ -2987,7 +2993,7 @@ H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
unsigned u; /* Local index */
/* Lock the current B-tree node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_WRITE)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set up information about current node */
@@ -3004,7 +3010,7 @@ H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_leaf_t *leaf; /* Pointer to leaf node */
/* Lock the current B-tree node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC_WRITE)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Set up information about current node */
@@ -3063,7 +3069,7 @@ H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDassert(depth > 0);
/* Lock the current B-tree node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Recursively descend into child nodes, if we are above the "twig" level in the B-tree */
diff --git a/src/H5B2pkg.h b/src/H5B2pkg.h
index a2d32e6..b13c58d 100644
--- a/src/H5B2pkg.h
+++ b/src/H5B2pkg.h
@@ -231,6 +231,7 @@ typedef enum H5B2_nodepos_t {
/* Callback info for loading a free space header into the cache */
typedef struct H5B2_hdr_cache_ud_t {
H5F_t *f; /* File that v2 b-tree header is within */
+ haddr_t addr; /* Address of B-tree header in the file */
void *ctx_udata; /* User-data for protecting */
} H5B2_hdr_cache_ud_t;
@@ -305,11 +306,11 @@ H5_DLL herr_t H5B2__hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id);
/* Routines for operating on leaf nodes */
H5B2_leaf_t *H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
- uint16_t nrec, H5AC_protect_t rw);
+ uint16_t nrec, unsigned flags);
/* Routines for operating on internal nodes */
H5_DLL H5B2_internal_t *H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
- haddr_t addr, uint16_t nrec, uint16_t depth, H5AC_protect_t rw);
+ haddr_t addr, uint16_t nrec, uint16_t depth, unsigned flags);
/* Routines for allocating nodes */
H5_DLL herr_t H5B2__split_root(H5B2_hdr_t *hdr, hid_t dxpl_id);
@@ -347,7 +348,7 @@ H5_DLL herr_t H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Routines for removing records */
H5_DLL herr_t H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
hbool_t *depth_decreased, void *swap_loc, uint16_t depth,
- H5AC_info_t *parent_cache_info, hbool_t *parent_cache_info_dirtied_ptr,
+ H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr,
H5B2_nodepos_t curr_pos, H5B2_node_ptr_t *curr_node_ptr, void *udata,
H5B2_remove_t op, void *op_data);
H5_DLL herr_t H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
@@ -355,8 +356,8 @@ H5_DLL herr_t H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
void *udata, H5B2_remove_t op, void *op_data);
H5_DLL herr_t H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
hbool_t *depth_decreased, void *swap_loc, uint16_t depth,
- H5AC_info_t *parent_cache_info, hbool_t *parent_cache_info_dirtied_ptr,
- H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t idx,
+ H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr,
+ H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t n,
H5B2_remove_t op, void *op_data);
H5_DLL herr_t H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos,
diff --git a/src/H5B2test.c b/src/H5B2test.c
index 1d31af8..35771f2 100644
--- a/src/H5B2test.c
+++ b/src/H5B2test.c
@@ -430,7 +430,7 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Locate node pointer for child */
@@ -470,7 +470,7 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate record */
diff --git a/src/H5Bcache.c b/src/H5Bcache.c
index a161e88..dfa4dab 100644
--- a/src/H5Bcache.c
+++ b/src/H5Bcache.c
@@ -37,7 +37,6 @@
#include "H5private.h" /* Generic Functions */
#include "H5Bpkg.h" /* B-link trees */
#include "H5Eprivate.h" /* Error handling */
-#include "H5MFprivate.h" /* File memory management */
/****************/
@@ -55,11 +54,14 @@
/********************/
/* Metadata cache callbacks */
-static H5B_t *H5B__load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5B__flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B_t *b, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5B__dest(H5F_t *f, H5B_t *bt);
-static herr_t H5B__clear(H5F_t *f, H5B_t *b, hbool_t destroy);
-static herr_t H5B__compute_size(const H5F_t *f, const H5B_t *bt, size_t *size_ptr);
+static herr_t H5B__get_load_size(const void *udata, size_t *image_len);
+static void *H5B__deserialize(const void *image, size_t len, void *udata,
+ hbool_t *dirty);
+static herr_t H5B__image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5B__serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5B__free_icr(void *thing);
/*********************/
@@ -68,13 +70,19 @@ static herr_t H5B__compute_size(const H5F_t *f, const H5B_t *bt, size_t *size_pt
/* H5B inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_BT[1] = {{
- H5AC_BT_ID,
- (H5AC_load_func_t)H5B__load,
- (H5AC_flush_func_t)H5B__flush,
- (H5AC_dest_func_t)H5B__dest,
- (H5AC_clear_func_t)H5B__clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5B__compute_size,
+ H5AC_BT_ID, /* Metadata client ID */
+ "v1 B-tree", /* Metadata client name (for debugging) */
+ H5FD_MEM_BTREE, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5B__get_load_size, /* 'get_load_size' callback */
+ H5B__deserialize, /* 'deserialize' callback */
+ H5B__image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5B__serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5B__free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear" callback */
+ NULL, /* 'fsf_size' callback */
}};
/*******************/
@@ -84,35 +92,71 @@ const H5AC_class_t H5AC_BT[1] = {{
/*-------------------------------------------------------------------------
- * Function: H5B__load
+ * Function: H5B__get_load_size
*
- * Purpose: Loads a B-tree node from the disk.
+ * Purpose: Compute the size of the data structure on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * May 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5B__get_load_size(const void *_udata, size_t *image_len)
+{
+ const H5B_cache_ud_t *udata = (const H5B_cache_ud_t *)_udata; /* User data for callback */
+ H5B_shared_t *shared; /* Pointer to shared B-tree info */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(image_len);
+
+ /* Get shared info for B-tree */
+ shared = (H5B_shared_t *)H5UC_GET_OBJ(udata->rc_shared);
+ HDassert(shared);
+
+ /* Set the image length size */
+ *image_len = shared->sizeof_rnode;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5B__get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5B__deserialize
+ *
+ * Purpose: Deserialize the data structure from disk.
*
* Return: Success: Pointer to a new B-tree node.
* Failure: NULL
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jun 23 1997
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Mar 24, 2008
*
*-------------------------------------------------------------------------
*/
-static H5B_t *
-H5B__load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+static void *
+H5B__deserialize(const void *_image, size_t H5_ATTR_UNUSED len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
{
H5B_t *bt = NULL; /* Pointer to the deserialized B-tree node */
H5B_cache_ud_t *udata = (H5B_cache_ud_t *)_udata; /* User data for callback */
H5B_shared_t *shared; /* Pointer to shared B-tree info */
- const uint8_t *p; /* Pointer into raw data buffer */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into image buffer */
uint8_t *native; /* Pointer to native keys */
unsigned u; /* Local index variable */
H5B_t *ret_value; /* Return value */
FUNC_ENTER_STATIC
- /* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* check arguments */
+ HDassert(image);
HDassert(udata);
/* Allocate the B-tree node in memory */
@@ -134,53 +178,50 @@ H5B__load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
if(NULL == (bt->child = H5FL_SEQ_MALLOC(haddr_t, (size_t)shared->two_k)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "can't allocate buffer for child addresses")
- if(H5F_block_read(f, H5FD_MEM_BTREE, addr, shared->sizeof_rnode, dxpl_id, shared->page) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_READERROR, NULL, "can't read B-tree node")
-
- /* Set the pointer into the raw data buffer */
- p = shared->page;
-
/* magic number */
- if(HDmemcmp(p, H5B_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5B_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "wrong B-tree signature")
- p += 4;
+ image += H5_SIZEOF_MAGIC;
/* node type and level */
- if(*p++ != (uint8_t)udata->type->id)
+ if(*image++ != (uint8_t)udata->type->id)
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, NULL, "incorrect B-tree node type")
- bt->level = *p++;
+ bt->level = *image++;
/* entries used */
- UINT16DECODE(p, bt->nchildren);
+ UINT16DECODE(image, bt->nchildren);
/* Check if bt->nchildren is greater than two_k */
if(bt->nchildren > shared->two_k)
HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "number of children is greater than maximum")
/* sibling pointers */
- H5F_addr_decode(udata->f, (const uint8_t **)&p, &(bt->left));
- H5F_addr_decode(udata->f, (const uint8_t **)&p, &(bt->right));
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &(bt->left));
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &(bt->right));
/* the child/key pairs */
native = bt->native;
for(u = 0; u < bt->nchildren; u++) {
/* Decode native key value */
- if((udata->type->decode)(shared, p, native) < 0)
+ if((udata->type->decode)(shared, image, native) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDECODE, NULL, "unable to decode key")
- p += shared->sizeof_rkey;
+ image += shared->sizeof_rkey;
native += udata->type->sizeof_nkey;
/* Decode address value */
- H5F_addr_decode(udata->f, (const uint8_t **)&p, bt->child + u);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, bt->child + u);
} /* end for */
/* Decode final key */
if(bt->nchildren > 0) {
/* Decode native key value */
- if((udata->type->decode)(shared, p, native) < 0)
+ if((udata->type->decode)(shared, image, native) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDECODE, NULL, "unable to decode key")
} /* end if */
+ /* Sanity check */
+ HDassert((size_t)((const uint8_t *)image - (const uint8_t *)_image) <= len);
+
/* Set return value */
ret_value = bt;
@@ -190,223 +231,151 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B__load() */ /*lint !e818 Can't make udata a pointer to const */
+} /* end H5B__deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5B__flush
+ * Function: H5B__image_len
*
- * Purpose: Flushes a dirty B-tree node to disk.
+ * Purpose: Compute the size of the data structure on disk.
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jun 23 1997
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * May 20, 2010
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B__flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B_t *bt, unsigned H5_ATTR_UNUSED * flags_ptr)
+H5B__image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
+ const H5B_t *bt = (const H5B_t *)_thing; /* Pointer to the B-tree node */
H5B_shared_t *shared; /* Pointer to shared B-tree info */
- herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC_NOERR
- /* check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(bt);
+ HDassert(image_len);
+
+ /* Get shared info for B-tree */
shared = (H5B_shared_t *)H5UC_GET_OBJ(bt->rc_shared);
HDassert(shared);
- HDassert(shared->type);
- HDassert(shared->type->encode);
- if(bt->cache_info.is_dirty) {
- uint8_t *p; /* Pointer into raw data buffer */
- uint8_t *native; /* Pointer to native keys */
- unsigned u; /* Local index variable */
-
- p = shared->page;
-
- /* magic number */
- HDmemcpy(p, H5B_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += 4;
-
- /* node type and level */
- *p++ = (uint8_t)shared->type->id;
- H5_CHECK_OVERFLOW(bt->level, unsigned, uint8_t);
- *p++ = (uint8_t)bt->level;
-
- /* entries used */
- UINT16ENCODE(p, bt->nchildren);
-
- /* sibling pointers */
- H5F_addr_encode(f, &p, bt->left);
- H5F_addr_encode(f, &p, bt->right);
-
- /* child keys and pointers */
- native = bt->native;
- for(u = 0; u < bt->nchildren; ++u) {
- /* encode the key */
- if(shared->type->encode(shared, p, native) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree key")
- p += shared->sizeof_rkey;
- native += shared->type->sizeof_nkey;
-
- /* encode the child address */
- H5F_addr_encode(f, &p, bt->child[u]);
- } /* end for */
- if(bt->nchildren > 0) {
- /* Encode the final key */
- if(shared->type->encode(shared, p, native) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree key")
- } /* end if */
-
- /*
- * Write the disk page. We always write the header, but we don't
- * bother writing data for the child entries that don't exist or
- * for the final unchanged children.
- */
- if(H5F_block_write(f, H5FD_MEM_BTREE, addr, shared->sizeof_rnode, dxpl_id, shared->page) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFLUSH, FAIL, "unable to save B-tree node to disk")
-
- bt->cache_info.is_dirty = FALSE;
- } /* end if */
-
- if(destroy)
- if(H5B__dest(f, bt) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node")
+ /* Set the image length size */
+ *image_len = shared->sizeof_rnode;
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B__flush() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5B__image_len() */
/*-------------------------------------------------------------------------
- * Function: H5B__dest
+ * Function: H5B__serialize
*
- * Purpose: Destroys a B-tree node in memory.
+ * Purpose: Serialize the data structure for writing to disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Jan 15 2003
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Mar 24, 2008
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B__dest(H5F_t *f, H5B_t *bt)
+H5B__serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5B_t *bt = (H5B_t *)_thing; /* Pointer to the B-tree node */
+ H5B_shared_t *shared; /* Pointer to shared B-tree info */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into image buffer */
+ uint8_t *native; /* Pointer to native keys */
+ unsigned u; /* Local index counter */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
- HDassert(f);
+ /* check arguments */
+ HDassert(image);
HDassert(bt);
HDassert(bt->rc_shared);
+ shared = (H5B_shared_t *)H5UC_GET_OBJ(bt->rc_shared);
+ HDassert(shared);
+ HDassert(shared->type);
+ HDassert(shared->type->encode);
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!bt->cache_info.free_file_space_on_destroy || H5F_addr_defined(bt->cache_info.addr));
+ /* magic number */
+ HDmemcpy(image, H5B_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += 4;
- /* Check for freeing file space for B-tree node */
- if(bt->cache_info.free_file_space_on_destroy) {
- H5B_shared_t *shared; /* Pointer to shared B-tree info */
+ /* node type and level */
+ *image++ = (uint8_t)shared->type->id;
+ H5_CHECK_OVERFLOW(bt->level, unsigned, uint8_t);
+ *image++ = (uint8_t)bt->level;
- /* Get the pointer to the shared B-tree info */
- shared = (H5B_shared_t *)H5UC_GET_OBJ(bt->rc_shared);
- HDassert(shared);
+ /* entries used */
+ UINT16ENCODE(image, bt->nchildren);
+
+ /* sibling pointers */
+ H5F_addr_encode(f, &image, bt->left);
+ H5F_addr_encode(f, &image, bt->right);
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_BTREE, H5AC_dxpl_id, bt->cache_info.addr, (hsize_t)shared->sizeof_rnode) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free B-tree node")
+ /* child keys and pointers */
+ native = bt->native;
+ for(u = 0; u < bt->nchildren; ++u) {
+ /* encode the key */
+ if(shared->type->encode(shared, image, native) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree key")
+ image += shared->sizeof_rkey;
+ native += shared->type->sizeof_nkey;
+
+ /* encode the child address */
+ H5F_addr_encode(f, &image, bt->child[u]);
+ } /* end for */
+ if(bt->nchildren > 0) {
+ /* Encode the final key */
+ if(shared->type->encode(shared, image, native) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree key")
} /* end if */
- /* Destroy B-tree node */
- if(H5B__node_dest(bt) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node")
+ /* Sanity check */
+ HDassert((size_t)((const uint8_t *)image - (const uint8_t *)_image) <= len);
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B__dest() */
+} /* end H5B__serialize() */
/*-------------------------------------------------------------------------
- * Function: H5B__clear
+ * Function: H5B__free_icr
*
- * Purpose: Mark a B-tree node in memory as non-dirty.
+ * Purpose: Destroy/release an "in core representation" of a data structure
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Mar 20 2003
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Mar 26, 2008
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5B__clear(H5F_t *f, H5B_t *bt, hbool_t destroy)
+H5B__free_icr(void *thing)
{
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
- HDassert(bt);
-
- /* Reset the dirty flag. */
- bt->cache_info.is_dirty = FALSE;
+ /* Check arguments */
+ HDassert(thing);
- if(destroy)
- if(H5B__dest(f, bt) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node")
+ /* Destroy B-tree node */
+ if(H5B__node_dest((H5B_t *)thing) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B__clear() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5B__compute_size
- *
- * Purpose: Compute the size in bytes of the specified instance of
- * H5B_t on disk, and return it in *len_ptr. On failure,
- * the value of *len_ptr is undefined.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 5/13/04
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5B__compute_size(const H5F_t H5_ATTR_UNUSED *f, const H5B_t *bt, size_t *size_ptr)
-{
- H5B_shared_t *shared; /* Pointer to shared B-tree info */
-
- FUNC_ENTER_STATIC_NOERR
-
- /* check arguments */
- HDassert(f);
- HDassert(bt);
- HDassert(bt->rc_shared);
- shared = (H5B_shared_t *)H5UC_GET_OBJ(bt->rc_shared);
- HDassert(shared);
- HDassert(shared->type);
- HDassert(size_ptr);
-
- /* Set size value */
- *size_ptr = shared->sizeof_rnode;
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5B__compute_size() */
+} /* end H5B__free_icr() */
diff --git a/src/H5Bdbg.c b/src/H5Bdbg.c
index 526a647..e6d54dc 100644
--- a/src/H5Bdbg.c
+++ b/src/H5Bdbg.c
@@ -89,7 +89,7 @@ H5B_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int f
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
- if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
+ if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
/*
@@ -206,7 +206,7 @@ H5B__assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
- bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ);
+ bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG);
HDassert(bt);
shared = (H5B_shared_t *)H5UC_GET_OBJ(bt->rc_shared);
HDassert(shared);
@@ -227,7 +227,7 @@ H5B__assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void
* test.
*/
for(ncell = 0; cur; ncell++) {
- bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, cur->addr, &cache_udata, H5AC_READ);
+ bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, cur->addr, &cache_udata, H5AC__READ_ONLY_FLAG);
HDassert(bt);
/* Check node header */
diff --git a/src/H5C.c b/src/H5C.c
index a5eaa93..fc4e8a5 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -85,12 +85,23 @@
#include "H5FDprivate.h" /* File drivers */
#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
+#include "H5MFprivate.h" /* File memory management */
#include "H5MMprivate.h" /* Memory management */
#include "H5Pprivate.h" /* Property lists */
#include "H5SLprivate.h" /* Skip lists */
/*
+ * Private macros.
+ */
+#if H5C_DO_MEMORY_SANITY_CHECKS
+#define H5C_IMAGE_EXTRA_SPACE 8
+#define H5C_IMAGE_SANITY_VALUE "DeadBeef"
+#else /* H5C_DO_MEMORY_SANITY_CHECKS */
+#define H5C_IMAGE_EXTRA_SPACE 0
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+/*
* Private file-scope variables.
*/
@@ -103,27 +114,21 @@ H5FL_DEFINE_STATIC(H5C_t);
*/
static herr_t H5C__auto_adjust_cache_size(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- hbool_t write_permitted,
- hbool_t * first_flush_ptr);
+ hid_t dxpl_id,
+ hbool_t write_permitted);
static herr_t H5C__autoadjust__ageout(H5F_t * f,
+ hid_t dxpl_id,
double hit_rate,
enum H5C_resize_status * status_ptr,
size_t * new_max_cache_size_ptr,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- hbool_t write_permitted,
- hbool_t * first_flush_ptr);
+ hbool_t write_permitted);
static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr);
static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- hbool_t write_permitted,
- hbool_t * first_flush_ptr);
+ hid_t dxpl_id,
+ hbool_t write_permitted);
static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr);
@@ -135,19 +140,16 @@ static herr_t H5C__flash_increase_cache_size(H5C_t * cache_ptr,
size_t old_entry_size,
size_t new_entry_size);
-static herr_t H5C_flush_single_entry(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- const H5C_class_t * type_ptr,
- haddr_t addr,
- unsigned flags,
- hbool_t * first_flush_ptr,
- hbool_t del_entry_from_slist_on_destroy);
+static herr_t H5C_flush_single_entry(const H5F_t * f,
+ hid_t dxpl_id,
+ haddr_t addr,
+ unsigned flags,
+ hbool_t del_entry_from_slist_on_destroy,
+ int64_t *entry_size_change_ptr);
-static herr_t H5C_flush_invalidate_cache(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- unsigned flags);
+static herr_t H5C_flush_invalidate_cache(const H5F_t * f,
+ hid_t dxpl_id,
+ unsigned flags);
static void * H5C_load_entry(H5F_t * f,
hid_t dxpl_id,
@@ -156,19 +158,16 @@ static void * H5C_load_entry(H5F_t * f,
void * udata);
static herr_t H5C_make_space_in_cache(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- size_t space_needed,
- hbool_t write_permitted,
- hbool_t * first_flush_ptr);
+ hid_t dxpl_id,
+ size_t space_needed,
+ hbool_t write_permitted);
static herr_t H5C_tag_entry(H5C_t * cache_ptr,
H5C_cache_entry_t * entry_ptr,
hid_t dxpl_id);
static herr_t H5C_flush_tagged_entries(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
+ hid_t dxpl_id,
H5C_t * cache_ptr,
haddr_t tag);
@@ -176,20 +175,29 @@ static herr_t H5C_mark_tagged_entries(H5C_t * cache_ptr,
haddr_t tag);
static herr_t H5C_flush_marked_entries(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
+ hid_t dxpl_id,
H5C_t * cache_ptr);
#if H5C_DO_TAGGING_SANITY_CHECKS
static herr_t H5C_verify_tag(int id, haddr_t tag);
#endif
+#if H5C_DO_SLIST_SANITY_CHECKS
+static hbool_t H5C_entry_in_skip_list(H5C_t * cache_ptr,
+ H5C_cache_entry_t *target_ptr);
+#endif /* H5C_DO_SLIST_SANITY_CHECKS */
+
#if H5C_DO_EXTREME_SANITY_CHECKS
static herr_t H5C_validate_lru_list(H5C_t * cache_ptr);
static herr_t H5C_validate_pinned_entry_list(H5C_t * cache_ptr);
static herr_t H5C_validate_protected_entry_list(H5C_t * cache_ptr);
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+#if 0 /* debugging routines */
+herr_t H5C_dump_cache(H5C_t * cache_ptr, const char * cache_name);
+herr_t H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn);
+#endif /* debugging routines */
+
/****************************************************************************
*
@@ -208,25 +216,53 @@ static herr_t H5C_validate_protected_entry_list(H5C_t * cache_ptr);
#define H5C__EPOCH_MARKER_TYPE H5C__MAX_NUM_TYPE_IDS
-static void *H5C_epoch_marker_load(H5F_t *f, hid_t dxpl_id, haddr_t addr,
- void *udata);
-static herr_t H5C_epoch_marker_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest,
- haddr_t addr, void *thing,
- unsigned *flags_ptr);
-static herr_t H5C_epoch_marker_dest(H5F_t *f, void *thing);
-static herr_t H5C_epoch_marker_clear(H5F_t *f, void *thing, hbool_t dest);
-static herr_t H5C_epoch_marker_notify(H5C_notify_action_t action, void *thing);
-static herr_t H5C_epoch_marker_size(const H5F_t *f, const void *thing, size_t *size_ptr);
+static herr_t H5C__epoch_marker_get_load_size(const void *udata_ptr,
+ size_t *image_len_ptr);
+static void * H5C__epoch_marker_deserialize(const void * image_ptr,
+ size_t len,
+ void * udata,
+ hbool_t * dirty_ptr);
+static herr_t H5C__epoch_marker_image_len(const void * thing,
+ size_t *image_len_ptr,
+ hbool_t *compressed_ptr,
+ size_t *compressed_len_ptr);
+static herr_t H5C__epoch_marker_pre_serialize(const H5F_t *f,
+ hid_t dxpl_id,
+ void * thing,
+ haddr_t addr,
+ size_t len,
+ size_t compressed_len,
+ haddr_t * new_addr_ptr,
+ size_t * new_len_ptr,
+ size_t * new_compressed_len_ptr,
+ unsigned * flags_ptr);
+static herr_t H5C__epoch_marker_serialize(const H5F_t *f,
+ void * image_ptr,
+ size_t len,
+ void * thing);
+static herr_t H5C__epoch_marker_notify(H5C_notify_action_t action, void *thing);
+static herr_t H5C__epoch_marker_free_icr(void * thing);
+
+static herr_t H5C__epoch_marker_clear(const H5F_t *f, void * thing,
+ hbool_t about_to_destroy);
+static herr_t H5C__epoch_marker_fsf_size(const void H5_ATTR_UNUSED * thing,
+ size_t H5_ATTR_UNUSED * fsf_size_ptr);
const H5C_class_t epoch_marker_class =
{
- /* id = */ H5C__EPOCH_MARKER_TYPE,
- /* load = */ &H5C_epoch_marker_load,
- /* flush = */ &H5C_epoch_marker_flush,
- /* dest = */ &H5C_epoch_marker_dest,
- /* clear = */ &H5C_epoch_marker_clear,
- /* notify = */&H5C_epoch_marker_notify,
- /* size = */ &H5C_epoch_marker_size
+ /* id = */ H5C__EPOCH_MARKER_TYPE,
+ /* name = */ "epoch marker",
+ /* mem_type = */ H5FD_MEM_DEFAULT, /* value doesn't matter */
+ /* flags = */ H5AC__CLASS_NO_FLAGS_SET,
+ /* get_load_size = */ H5C__epoch_marker_get_load_size,
+ /* deserialize = */ H5C__epoch_marker_deserialize,
+ /* image_len = */ H5C__epoch_marker_image_len,
+ /* pre_serialize = */ H5C__epoch_marker_pre_serialize,
+ /* serialize = */ H5C__epoch_marker_serialize,
+ /* notify = */ H5C__epoch_marker_notify,
+ /* free_icr = */ H5C__epoch_marker_free_icr,
+ /* clear = */ H5C__epoch_marker_clear,
+ /* fsf_size = */ H5C__epoch_marker_fsf_size,
};
@@ -239,107 +275,114 @@ const H5C_class_t epoch_marker_class =
* JRM - 11/16/04
*
***************************************************************************/
-
-static void *
-H5C_epoch_marker_load(H5F_t H5_ATTR_UNUSED * f,
- hid_t H5_ATTR_UNUSED dxpl_id,
- haddr_t H5_ATTR_UNUSED addr,
- void H5_ATTR_UNUSED * udata)
+static herr_t
+H5C__epoch_marker_get_load_size(const void H5_ATTR_UNUSED *udata_ptr,
+ size_t H5_ATTR_UNUSED *image_len_ptr)
{
- void * ret_value = NULL; /* Return value */
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
- FUNC_ENTER_NOAPI_NOINIT
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "called unreachable fcn.")
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__epoch_marker_get_load_size() */
-done:
+
+static void *
+H5C__epoch_marker_deserialize(const void H5_ATTR_UNUSED * image_ptr, size_t H5_ATTR_UNUSED len,
+ void H5_ATTR_UNUSED * udata, hbool_t H5_ATTR_UNUSED * dirty_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
- FUNC_LEAVE_NOAPI(ret_value)
-}
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+ FUNC_LEAVE_NOAPI(NULL)
+} /* end H5C__epoch_marker_deserialize() */
static herr_t
-H5C_epoch_marker_flush(H5F_t H5_ATTR_UNUSED *f,
- hid_t H5_ATTR_UNUSED dxpl_id,
- hbool_t H5_ATTR_UNUSED dest,
- haddr_t H5_ATTR_UNUSED addr,
- void H5_ATTR_UNUSED *thing,
- unsigned H5_ATTR_UNUSED * flags_ptr)
+H5C__epoch_marker_image_len(const void H5_ATTR_UNUSED *thing,
+ size_t H5_ATTR_UNUSED *image_len_ptr, hbool_t H5_ATTR_UNUSED *compressed_ptr,
+ size_t H5_ATTR_UNUSED *compressed_len_ptr)
{
- herr_t ret_value = FAIL; /* Return value */
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
- FUNC_ENTER_NOAPI_NOINIT
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.")
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__epoch_marker_image_len() */
-done:
+
+static herr_t
+H5C__epoch_marker_pre_serialize(const H5F_t H5_ATTR_UNUSED *f, hid_t H5_ATTR_UNUSED dxpl_id,
+ void H5_ATTR_UNUSED *thing, haddr_t H5_ATTR_UNUSED addr, size_t H5_ATTR_UNUSED len,
+ size_t H5_ATTR_UNUSED compressed_len, haddr_t H5_ATTR_UNUSED *new_addr_ptr,
+ size_t H5_ATTR_UNUSED *new_len_ptr, size_t H5_ATTR_UNUSED *new_compressed_len_ptr,
+ unsigned H5_ATTR_UNUSED *flags_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
- FUNC_LEAVE_NOAPI(ret_value)
-}
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__epoch_marker_pre_serialize() */
static herr_t
-H5C_epoch_marker_dest(H5F_t H5_ATTR_UNUSED * f,
- void H5_ATTR_UNUSED * thing)
+H5C__epoch_marker_serialize(const H5F_t H5_ATTR_UNUSED *f, void H5_ATTR_UNUSED *image_ptr,
+ size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED *thing)
{
- herr_t ret_value = FAIL; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.")
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
-done:
-
- FUNC_LEAVE_NOAPI(ret_value)
-}
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__epoch_marker_serialize() */
+
static herr_t
-H5C_epoch_marker_clear(H5F_t H5_ATTR_UNUSED * f,
- void H5_ATTR_UNUSED * thing,
- hbool_t H5_ATTR_UNUSED dest)
+H5C__epoch_marker_notify(H5C_notify_action_t H5_ATTR_UNUSED action,
+ void H5_ATTR_UNUSED * thing)
{
- herr_t ret_value = FAIL; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.")
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
-done:
-
- FUNC_LEAVE_NOAPI(ret_value)
-}
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__epoch_marker_notify() */
+
static herr_t
-H5C_epoch_marker_notify(H5C_notify_action_t H5_ATTR_UNUSED action,
- void H5_ATTR_UNUSED * thing)
+H5C__epoch_marker_free_icr(void H5_ATTR_UNUSED * thing)
{
- herr_t ret_value = FAIL; /* Return value */
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
- FUNC_ENTER_NOAPI_NOINIT
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-}
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__epoch_marker_free_icr() */
-static herr_t
-H5C_epoch_marker_size(const H5F_t H5_ATTR_UNUSED * f,
- const void H5_ATTR_UNUSED * thing,
- size_t H5_ATTR_UNUSED * size_ptr)
+
+static herr_t
+H5C__epoch_marker_clear(const H5F_t H5_ATTR_UNUSED *f, void H5_ATTR_UNUSED * thing, hbool_t H5_ATTR_UNUSED about_to_destroy)
{
- herr_t ret_value = FAIL; /* Return value */
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
- FUNC_ENTER_NOAPI_NOINIT
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.")
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__epoch_marker_clear() */
-done:
+
+static herr_t
+H5C__epoch_marker_fsf_size(const void H5_ATTR_UNUSED * thing, size_t H5_ATTR_UNUSED *fsf_size_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
+
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__epoch_marker_fsf_size() */
- FUNC_LEAVE_NOAPI(ret_value)
-}
/*-------------------------------------------------------------------------
@@ -423,21 +466,46 @@ done:
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: Ported code to detect next entry status changes as the
+ * the result of a flush from the serial code in the scan of
+ * the LRU. Also added code to detect and adapt to the
+ * removal from the cache of the next entry in the scan of
+ * the LRU.
+ *
+ * Note that at present, all of these changes should not
+ * be required as the operations on entries as they are
+ * flushed that can cause these condiditions are not premitted
+ * in the parallel case. However, Quincey indicates that
+ * this may change, and thus has requested the modification.
+ *
+ * Note the assert(FALSE) in the if statement whose body
+ * restarts the scan of the LRU. As the body of the if
+ * statement should be unreachable, it should never be
+ * triggered until the constraints on the parallel case
+ * are relaxed. Please remove the assertion at that time.
+ *
+ * Also added warning on the Pinned Entry List scan, as it
+ * is potentially subject to the same issue. As there is
+ * no cognate of this scan in the serial code, I don't have
+ * a fix to port to it.
+ *
+ * JRM -- 4/10/19
+ *
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
#define H5C_APPLY_CANDIDATE_LIST__DEBUG 0
herr_t
H5C_apply_candidate_list(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
+ hid_t dxpl_id,
H5C_t * cache_ptr,
int num_candidates,
haddr_t * candidates_list_ptr,
int mpi_rank,
int mpi_size)
{
- hbool_t first_flush = FALSE;
+ hbool_t restart_scan;
+ hbool_t prev_is_dirty;
int i;
int m;
int n;
@@ -457,6 +525,7 @@ H5C_apply_candidate_list(H5F_t * f,
int * candidate_assignment_table = NULL;
haddr_t addr;
H5C_cache_entry_t * clear_ptr = NULL;
+ H5C_cache_entry_t * next_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * flush_ptr = NULL;
H5C_cache_entry_t * delayed_ptr = NULL;
@@ -634,16 +703,29 @@ H5C_apply_candidate_list(H5F_t * f,
* should be reworked to account for additional cases.
* ===================================================================== */
+ HDassert(entries_to_flush >= 0);
+
+ restart_scan = FALSE;
entries_examined = 0;
initial_list_len = cache_ptr->LRU_list_len;
entry_ptr = cache_ptr->LRU_tail_ptr;
/* Examine each entry in the LRU list */
- while((entry_ptr != NULL) && (entries_examined <= initial_list_len) &&
- ((entries_cleared + entries_flushed) < num_candidates)) {
+ while ( ( entry_ptr != NULL )
+ &&
+ ( entries_examined <= (entries_to_flush + 1) * initial_list_len )
+ &&
+ ( (entries_cleared + entries_flushed) < num_candidates ) ) {
+
+ if ( entry_ptr->prev != NULL )
+ prev_is_dirty = entry_ptr->prev->is_dirty;
/* If this process needs to clear this entry. */
if(entry_ptr->clear_on_unprotect) {
+
+ HDassert(entry_ptr->is_dirty);
+
+ next_ptr = entry_ptr->next;
entry_ptr->clear_on_unprotect = FALSE;
clear_ptr = entry_ptr;
entry_ptr = entry_ptr->prev;
@@ -654,20 +736,26 @@ H5C_apply_candidate_list(H5F_t * f,
(long long)clear_ptr->addr);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+ /* No need to check for the next entry in the scan being
+ * removed from the cache, as this call to H5C_flush_single_entry()
+ * will not call either the pre_serialize or serialize callbacks.
+ */
+
if(H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- clear_ptr->type,
+ dxpl_id,
clear_ptr->addr,
H5C__FLUSH_CLEAR_ONLY_FLAG,
- &first_flush,
- TRUE) < 0)
+ TRUE,
+ NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
} /* end if */
/* Else, if this process needs to flush this entry. */
else if (entry_ptr->flush_immediately) {
+ HDassert(entry_ptr->is_dirty);
+
+ next_ptr = entry_ptr->next;
entry_ptr->flush_immediately = FALSE;
flush_ptr = entry_ptr;
entry_ptr = entry_ptr->prev;
@@ -678,22 +766,94 @@ H5C_apply_candidate_list(H5F_t * f,
(long long)flush_ptr->addr);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C_flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ *
+ * Note that as of this writing (April 2015) this
+ * case cannot occur in the parallel case. However
+ * Quincey is making noises about changing this, hence
+ * the insertion of this test.
+ *
+ * Note also that there is no test code to verify
+ * that this code actually works (although similar code
+ * in the serial version exists and is tested).
+ *
+ * Implementing a test will likely require implementing
+ * flush op like facilities in the parallel tests. At
+ * a guess this will not be terribly painful, but it
+ * will take a bit of time.
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
+
if(H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- flush_ptr->type,
+ dxpl_id,
flush_ptr->addr,
H5C__NO_FLAGS_SET,
- &first_flush,
- TRUE) < 0)
+ TRUE,
+ NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't flush entry.")
+
+ if ( ( cache_ptr->entries_removed_counter > 1 ) ||
+ ( cache_ptr->last_entry_removed_ptr == entry_ptr ) )
+
+ restart_scan = TRUE;
+
} /* end else-if */
/* Otherwise, no action to be taken on this entry. Grab the next. */
else {
entry_ptr = entry_ptr->prev;
+
+ if ( entry_ptr != NULL )
+ next_ptr = entry_ptr->next;
+
} /* end else */
+ if ( ( entry_ptr != NULL )
+ &&
+ ( ( restart_scan )
+ ||
+ ( entry_ptr->is_dirty != prev_is_dirty )
+ ||
+ ( entry_ptr->next != next_ptr )
+ ||
+ ( entry_ptr->is_protected )
+ ||
+ ( entry_ptr->is_pinned )
+ )
+ ) {
+
+ /* something has happened to the LRU -- start over
+ * from the tail.
+ *
+ * Recall that this code should be un-reachable at present,
+ * as all the operations by entries on flush that could cause
+ * it to be reachable are disallowed in the parallel case at
+ * present. Hence the following assertion which should be
+ * removed if the above changes.
+ */
+
+ HDassert( ! restart_scan );
+ HDassert( entry_ptr->is_dirty == prev_is_dirty );
+ HDassert( entry_ptr->next == next_ptr );
+ HDassert( ! entry_ptr->is_protected );
+ HDassert( ! entry_ptr->is_pinned );
+
+ HDassert(FALSE); /* see comment above */
+
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+/*
+ H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
+*/
+ }
+
entries_examined++;
} /* end while */
@@ -705,6 +865,30 @@ H5C_apply_candidate_list(H5F_t * f,
/* It is also possible that some of the cleared entries are on the
* pinned list. Must scan that also.
+ *
+ * WARNING:
+ *
+ * As we now allow unpinning, and removal of other entries as a side
+ * effect of flushing an entry, it is possible that the next entry
+ * in a PEL scan could either be no longer pinned, or no longer in
+ * the cache by the time we get to it.
+ *
+ * At present, this is not possible in this case, as we disallow such
+ * operations in the parallel version of the library. However, Quincey
+ * has been making noises about relaxing this. If and when he does,
+ * we have a potential problem here.
+ *
+ * The same issue exists in the serial cache, and there are tests
+ * to detect this problem when it occurs, and adjust to it. As seen
+ * above in the LRU scan, I have ported such tests to the parallel
+ * code where a close cognate exists in the serial code.
+ *
+ * I haven't done so here, as there are no PEL scans where the problem
+ * can occur in the serial code. Needless to say, this will have to
+ * be repaired if the constraints on pre_serialize and serialize
+ * callbacks are relaxed in the parallel version of the metadata cache.
+ *
+ * JRM -- 4/1/15
*/
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
@@ -772,14 +956,12 @@ H5C_apply_candidate_list(H5F_t * f,
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
if(H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- clear_ptr->type,
- clear_ptr->addr,
- H5C__FLUSH_CLEAR_ONLY_FLAG,
- &first_flush,
- TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ dxpl_id,
+ clear_ptr->addr,
+ H5C__FLUSH_CLEAR_ONLY_FLAG,
+ TRUE,
+ NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
} /* end else-if */
/* Else, if this process needs to independently flush this entry. */
@@ -795,14 +977,12 @@ H5C_apply_candidate_list(H5F_t * f,
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
if(H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- flush_ptr->type,
- flush_ptr->addr,
- H5C__NO_FLAGS_SET,
- &first_flush,
- TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't flush entry.")
+ dxpl_id,
+ flush_ptr->addr,
+ H5C__NO_FLAGS_SET,
+ TRUE,
+ NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
} /* end else-if */
} /* end if */
@@ -843,13 +1023,11 @@ H5C_apply_candidate_list(H5F_t * f,
} /* end if */
if(H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- delayed_ptr->type,
+ dxpl_id,
delayed_ptr->addr,
H5C__NO_FLAGS_SET,
- &first_flush,
- TRUE) < 0)
+ TRUE,
+ NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
"Can't flush entry collectively.")
@@ -1184,6 +1362,9 @@ H5C_create(size_t max_cache_size,
/* Tagging Field Initializations */
cache_ptr->ignore_tags = FALSE;
+ cache_ptr->slist_changed = FALSE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
cache_ptr->slist_len = 0;
cache_ptr->slist_size = (size_t)0;
@@ -1197,6 +1378,9 @@ H5C_create(size_t max_cache_size,
(cache_ptr->index)[i] = NULL;
}
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
+
cache_ptr->pl_len = 0;
cache_ptr->pl_size = (size_t)0;
cache_ptr->pl_head_ptr = NULL;
@@ -1271,11 +1455,8 @@ H5C_create(size_t max_cache_size,
/* Set non-zero/FALSE/NULL fields for epoch markers */
for ( i = 0; i < H5C__MAX_EPOCH_MARKERS; i++ )
{
- (cache_ptr->epoch_marker_active)[i] = FALSE;
-#ifndef NDEBUG
((cache_ptr->epoch_markers)[i]).magic =
H5C__H5C_CACHE_ENTRY_T_MAGIC;
-#endif /* NDEBUG */
((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i;
((cache_ptr->epoch_markers)[i]).type = &epoch_marker_class;
}
@@ -1530,9 +1711,7 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
*-------------------------------------------------------------------------
*/
herr_t
-H5C_dest(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id)
+H5C_dest(H5F_t * f, hid_t dxpl_id)
{
H5C_t * cache_ptr = f->shared->cache;
herr_t ret_value = SUCCEED; /* Return value */
@@ -1544,8 +1723,7 @@ H5C_dest(H5F_t * f,
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
/* Flush and invalidate all cache entries */
- if(H5C_flush_invalidate_cache(f, primary_dxpl_id, secondary_dxpl_id,
- H5C__NO_FLAGS_SET) < 0 )
+ if(H5C_flush_invalidate_cache(f, dxpl_id, H5C__NO_FLAGS_SET) < 0 )
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
if(cache_ptr->slist_ptr != NULL) {
@@ -1570,7 +1748,9 @@ H5C_dest(H5F_t * f,
#endif /* H5C_DO_SANITY_CHECKS */
#endif /* NDEBUG */
+#ifndef NDEBUG
cache_ptr->magic = 0;
+#endif /* NDEBUG */
cache_ptr = H5FL_FREE(H5C_t, cache_ptr);
@@ -1595,17 +1775,16 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_expunge_entry(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- const H5C_class_t * type,
- haddr_t addr,
- unsigned flags)
+H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
+ haddr_t addr, unsigned flags)
{
H5C_t * cache_ptr;
- herr_t result;
- hbool_t first_flush = TRUE;
H5C_cache_entry_t * entry_ptr = NULL;
+ unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG);
+#if H5C_DO_SANITY_CHECKS
+ hbool_t entry_was_dirty;
+ hsize_t entry_size;
+#endif /* H5C_DO_SANITY_CHECKS */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1616,8 +1795,6 @@ H5C_expunge_entry(H5F_t * f,
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(type);
- HDassert(type->clear);
- HDassert(type->dest);
HDassert(H5F_addr_defined(addr));
#if H5C_DO_EXTREME_SANITY_CHECKS
@@ -1641,27 +1818,34 @@ H5C_expunge_entry(H5F_t * f,
if(entry_ptr->is_pinned)
HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned.")
- /* Pass along 'free file space' flag to cache client */
- entry_ptr->free_file_space_on_destroy = ( (flags & H5C__FREE_FILE_SPACE_FLAG) != 0 );
-
/* If we get this far, call H5C_flush_single_entry() with the
* H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG.
* This will clear the entry, and then delete it from the cache.
*/
- result = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- entry_ptr->type,
- entry_ptr->addr,
- H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG,
- &first_flush,
- TRUE);
- if ( result < 0 ) {
+ /* Pass along 'free file space' flag to cache client. */
+ flush_flags |= (flags & H5C__FREE_FILE_SPACE_FLAG);
+
+#if H5C_DO_SANITY_CHECKS
+ entry_was_dirty = entry_ptr->is_dirty;
+ entry_size = entry_ptr->size;
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ if(H5C_flush_single_entry(f, dxpl_id, entry_ptr->addr, flush_flags, TRUE, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "H5C_flush_single_entry() failed.")
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, \
- "H5C_flush_single_entry() failed.")
+#if H5C_DO_SANITY_CHECKS
+ if ( entry_was_dirty )
+ {
+ /* we have just removed an entry from the skip list. Thus
+ * we must touch up cache_ptr->slist_len_increase and
+ * cache_ptr->slist_size_increase to keep from skewing
+ * the sanity checks on flushes.
+ */
+ cache_ptr->slist_len_increase -= 1;
+ cache_ptr->slist_size_increase -= (int64_t)(entry_size);
}
+#endif /* H5C_DO_SANITY_CHECKS */
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
@@ -1698,10 +1882,24 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
+ * Changes: Modified function to test for slist chamges in
+ * pre_serialize and serialize callbacks, and re-start
+ * scans through the slist when such changes occur.
+ *
+ * This has been a potential problem for some time,
+ * and there has been code in this function to deal
+ * with elements of this issue. However the shift
+ * to the V3 cache in combination with the activities
+ * of some of the cache clients (in particular the
+ * free space manager and the fractal heap) have
+ * made this re-work necessary.
+ *
+ * JRM -- 12/13/14
+ *
*-------------------------------------------------------------------------
*/
herr_t
-H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsigned flags)
+H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
{
H5C_t * cache_ptr = f->shared->cache;
herr_t status;
@@ -1709,19 +1907,23 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
hbool_t destroy;
hbool_t flushed_entries_last_pass;
hbool_t flush_marked_entries;
- hbool_t first_flush = TRUE;
hbool_t ignore_protected;
hbool_t tried_to_flush_protected_entry = FALSE;
+ hbool_t restart_slist_scan;
int32_t passes = 0;
int32_t protected_entries = 0;
H5SL_node_t * node_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t flushed_entries_count;
- size_t flushed_entries_size;
- int64_t initial_slist_len;
- size_t initial_slist_size;
+ int64_t flushed_entries_count = 0;
+ int64_t flushed_entries_size = 0;
+ int64_t initial_slist_len = 0;
+ size_t initial_slist_size = 0;
+ int64_t entry_size_change;
+ int64_t * entry_size_change_ptr = &entry_size_change;
+#else /* H5C_DO_SANITY_CHECKS */
+ int64_t * entry_size_change_ptr = NULL;
#endif /* H5C_DO_SANITY_CHECKS */
FUNC_ENTER_NOAPI(FAIL)
@@ -1758,10 +1960,7 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
if ( destroy ) {
- status = H5C_flush_invalidate_cache(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- flags);
+ status = H5C_flush_invalidate_cache(f, dxpl_id, flags);
if ( status < 0 ) {
@@ -1780,6 +1979,18 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
flushed_entries_last_pass = TRUE;
+ /* set the cache_ptr->slist_change_in_pre_serialize and
+ * cache_ptr->slist_change_in_serialize to false.
+ *
+ * These flags are set to TRUE by H5C_flush_single_entry if the
+ * slist is modified by a pre_serialize or serialize call respectively.
+ * H5C_flush_cache uses these flags to detect any modifications
+ * to the slist that might corrupt the scan of the slist -- and
+ * restart the scan in this event.
+ */
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
while ( ( passes < H5C__MAX_PASSES_ON_FLUSH ) &&
( cache_ptr->slist_len != 0 ) &&
( protected_entries == 0 ) &&
@@ -1797,18 +2008,6 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
{
hbool_t flushed_during_dep_loop = FALSE;
- /* Start at beginning of skip list each time */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
- HDassert( node_ptr != NULL );
-
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if ( NULL == next_entry_ptr )
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
-
#if H5C_DO_SANITY_CHECKS
/* For sanity checking, try to verify that the skip list has
* the expected size and number of entries at the end of each
@@ -1818,8 +2017,10 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
* or may not flush all the entries in the slist.
*
* To make things more entertaining, with the advent of the
- * fractal heap, the entry flush callback can cause entries
- * to be dirtied, resized, and/or moved.
+ * fractal heap, the entry serialize callback can cause entries
+ * to be dirtied, resized, and/or moved. Also, the
+ * pre_serialize callback can result in an entry being
+ * removed from the cache via the take ownership flag.
*
* To deal with this, we first make note of the initial
* skip list length and size:
@@ -1834,7 +2035,8 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
flushed_entries_size = 0;
/* As mentioned above, there is the possibility that
- * entries will be dirtied, resized, and/or flushed during
+ * entries will be dirtied, resized, flushed, or removed
+ * from the cache via the take ownership flag during
* our pass through the skip list. To capture the number
* of entries added, and the skip list size delta,
* zero the slist_len_increase and slist_size_increase of
@@ -1851,53 +2053,60 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
*/
#endif /* H5C_DO_SANITY_CHECKS */
- while ( node_ptr != NULL )
+ restart_slist_scan = TRUE;
+
+ while ( ( restart_slist_scan ) || ( node_ptr != NULL ) )
{
- entry_ptr = next_entry_ptr;
+ if ( restart_slist_scan )
+ {
+ restart_slist_scan = FALSE;
- /* With the advent of the fractal heap, it is possible
- * that the flush callback will dirty and/or resize
- * other entries in the cache. In particular, while
- * Quincey has promised me that this will never happen,
- * it is possible that the flush callback for an
- * entry may protect an entry that is not in the cache,
- * perhaps causing the cache to flush and possibly
- * evict the entry associated with node_ptr to make
- * space for the new entry.
- *
- * Thus we do a bit of extra sanity checking on entry_ptr,
- * and break out of this scan of the skip list if we
- * detect minor problems. We have a bit of leaway on the
- * number of passes though the skip list, so this shouldn't
- * be an issue in the flush in and of itself, as it should
- * be all but impossible for this to happen more than once
- * in any flush.
- *
- * Observe that that breaking out of the scan early
- * shouldn't break the sanity checks just after the end
- * of this while loop.
- *
- * If an entry has merely been marked clean and removed from
- * the s-list, we simply break out of the scan.
- *
- * If the entry has been evicted, we flag an error and
- * exit.
- */
-#ifndef NDEBUG
- if ( entry_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC ) {
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry_ptr->magic is invalid ?!?!")
+ if ( node_ptr == NULL )
+ {
+ /* the slist is empty -- break out of inner loop */
+ break;
+ }
+ HDassert( node_ptr != NULL );
- } else
-#endif /* NDEBUG */
- if ( ( ! entry_ptr->is_dirty ) ||
- ( ! entry_ptr->in_slist ) ) {
+ /* Get cache entry for this node */
+ next_entry_ptr =
+ (H5C_cache_entry_t *)H5SL_item(node_ptr);
- /* the s-list has been modified out from under us.
- * break out of the loop.
- */
- goto end_of_inner_loop;;
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL ?!?!")
+
+ HDassert( next_entry_ptr->magic == \
+ H5C__H5C_CACHE_ENTRY_T_MAGIC );
+ HDassert( next_entry_ptr->is_dirty );
+ HDassert( next_entry_ptr->in_slist );
}
+
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, the free space
+ * manager, and the version 3 cache, it is possible
+ * that the pre-serialize or serialize callback will
+ * dirty, resize, or take ownership of other entries
+ * in the cache.
+ *
+ * To deal with this, I have inserted code to detect any
+ * change in the skip list not directly under the control
+ * of this function. If such modifications are detected,
+ * we must re-start the scan of the skip list to avoid
+ * the possibility that the target of the next_entry_ptr
+ * may have been flushed or deleted from the cache.
+ *
+ * To verify that all such possibilities have been dealt
+ * with, we do a bit of extra sanity checking on
+ * entry_ptr.
+ */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
/* increment node pointer now, before we delete its target
* from the slist.
@@ -1911,6 +2120,7 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
HDassert( next_entry_ptr->is_dirty );
HDassert( next_entry_ptr->in_slist );
+ HDassert( entry_ptr != next_entry_ptr );
} else {
next_entry_ptr = NULL;
}
@@ -1927,9 +2137,9 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
if ( entry_ptr->is_protected ) {
- /* we probably have major problems -- but lets flush
- * everything we can before we decide whether to flag
- * an error.
+ /* we probably have major problems -- but lets
+ * flush everything we can before we decide
+ * whether to flag an error.
*/
tried_to_flush_protected_entry = TRUE;
protected_entries++;
@@ -1943,16 +2153,15 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
#if H5C_DO_SANITY_CHECKS
flushed_entries_count++;
- flushed_entries_size += entry_ptr->size;
+ flushed_entries_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
status = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- NULL,
- entry_ptr->addr,
- flags,
- &first_flush,
- FALSE);
+ dxpl_id,
+ entry_ptr->addr,
+ flags,
+ FALSE,
+ entry_size_change_ptr);
if ( status < 0 ) {
/* This shouldn't happen -- if it does,
@@ -1961,7 +2170,35 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
"dirty pinned entry flush failed.")
} /* end if */
+
+#if H5C_DO_SANITY_CHECKS
+ /* it is possible that the entry size changed
+ * during flush -- update flushed_entries_size
+ * to account for this.
+ */
+ flushed_entries_size += entry_size_change;
+#endif /* H5C_DO_SANITY_CHECKS */
+
flushed_during_dep_loop = TRUE;
+
+ if ((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize))
+ {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize
+ = FALSE;
+ cache_ptr->slist_change_in_serialize
+ = FALSE;
+
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ }
} /* end if */
else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
/* This shouldn't happen -- if it does, just scream and die. */
@@ -1976,16 +2213,15 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
#if H5C_DO_SANITY_CHECKS
flushed_entries_count++;
- flushed_entries_size += entry_ptr->size;
+ flushed_entries_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
status = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- NULL,
- entry_ptr->addr,
- flags,
- &first_flush,
- FALSE);
+ dxpl_id,
+ entry_ptr->addr,
+ flags,
+ FALSE,
+ entry_size_change_ptr);
if ( status < 0 ) {
/* This shouldn't happen -- if it does,
@@ -1994,14 +2230,42 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
"Can't flush entry.")
}
+
+#if H5C_DO_SANITY_CHECKS
+ /* it is possible that the entry size changed
+ * during flush -- update flushed_entries_size
+ * to account for this.
+ */
+ flushed_entries_size += entry_size_change;
+#endif /* H5C_DO_SANITY_CHECKS */
+
flushed_during_dep_loop = TRUE;
+
+ if ((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize))
+ {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize
+ = FALSE;
+ cache_ptr->slist_change_in_serialize
+ = FALSE;
+
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ }
} /* end if */
else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
/* This shouldn't happen -- if it does, just scream and die. */
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
} /* end else */
} /* end if */
- } /* while ( node_ptr != NULL ) */
+ } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
/* Check for incrementing flush dependency height */
if(flushed_during_dep_loop) {
@@ -2020,20 +2284,19 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
curr_flush_dep_height++;
} /* while ( curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) */
-end_of_inner_loop:
+
+ passes++;
#if H5C_DO_SANITY_CHECKS
/* Verify that the slist size and length are as expected. */
HDassert( (initial_slist_len + cache_ptr->slist_len_increase -
flushed_entries_count) == cache_ptr->slist_len );
- HDassert( (initial_slist_size +
- (size_t)(cache_ptr->slist_size_increase) -
+ HDassert( (size_t)((int64_t)initial_slist_size +
+ cache_ptr->slist_size_increase -
flushed_entries_size) == cache_ptr->slist_size );
#endif /* H5C_DO_SANITY_CHECKS */
- passes++;
-
} /* while */
HDassert( protected_entries <= cache_ptr->pl_len );
@@ -2098,12 +2361,10 @@ done:
*/
herr_t
H5C_flush_to_min_clean(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id)
+ hid_t dxpl_id)
{
H5C_t * cache_ptr;
herr_t result;
- hbool_t first_flush = TRUE;
hbool_t write_permitted;
#if 0 /* modified code -- commented out for now */
int i;
@@ -2127,9 +2388,7 @@ H5C_flush_to_min_clean(H5F_t * f,
if ( cache_ptr->check_write_permitted != NULL ) {
- result = (cache_ptr->check_write_permitted)(f,
- primary_dxpl_id,
- &write_permitted);
+ result = (cache_ptr->check_write_permitted)(f, &write_permitted);
if ( result < 0 ) {
@@ -2148,11 +2407,9 @@ H5C_flush_to_min_clean(H5F_t * f,
}
#if 1 /* original code */
result = H5C_make_space_in_cache(f,
- primary_dxpl_id,
- secondary_dxpl_id,
+ dxpl_id,
(size_t)0,
- write_permitted,
- &first_flush);
+ write_permitted);
if ( result < 0 ) {
@@ -2569,25 +2826,23 @@ done:
* file logging is turned off), or contain a pointer to the
* open file to which trace file data is to be written.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-NULL trace file pointer (can't fail)
*
* Programmer: John Mainzer
* 1/20/06
*
*-------------------------------------------------------------------------
*/
-herr_t
-H5C_get_trace_file_ptr(const H5C_t *cache_ptr, FILE **trace_file_ptr_ptr)
+FILE *
+H5C_get_trace_file_ptr(const H5C_t *cache_ptr)
{
FUNC_ENTER_NOAPI_NOERR
+ /* Check arguments */
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(trace_file_ptr_ptr);
-
- *trace_file_ptr_ptr = cache_ptr->trace_file_ptr;
- FUNC_LEAVE_NOAPI(SUCCEED)
+ FUNC_LEAVE_NOAPI(cache_ptr->trace_file_ptr)
} /* H5C_get_trace_file_ptr() */
@@ -2600,16 +2855,15 @@ H5C_get_trace_file_ptr(const H5C_t *cache_ptr, FILE **trace_file_ptr_ptr)
* file logging is turned off), or contain a pointer to the
* open file to which trace file data is to be written.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-NULL trace file pointer (can't fail)
*
* Programmer: Quincey Koziol
* 6/9/08
*
*-------------------------------------------------------------------------
*/
-herr_t
-H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr,
- FILE **trace_file_ptr_ptr)
+FILE *
+H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr)
{
FUNC_ENTER_NOAPI_NOERR
@@ -2617,9 +2871,7 @@ H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr,
HDassert(entry_ptr);
HDassert(entry_ptr->cache_ptr);
- H5C_get_trace_file_ptr(entry_ptr->cache_ptr, trace_file_ptr_ptr);
-
- FUNC_LEAVE_NOAPI(SUCCEED)
+ FUNC_LEAVE_NOAPI(H5C_get_trace_file_ptr(entry_ptr->cache_ptr))
} /* H5C_get_trace_file_ptr_from_entry() */
@@ -2652,8 +2904,7 @@ H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr,
*/
herr_t
H5C_insert_entry(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
+ hid_t dxpl_id,
const H5C_class_t * type,
haddr_t addr,
void * thing,
@@ -2661,7 +2912,6 @@ H5C_insert_entry(H5F_t * f,
{
H5C_t * cache_ptr;
herr_t result;
- hbool_t first_flush = TRUE;
hbool_t insert_pinned;
hbool_t flush_last;
#ifdef H5_HAVE_PARALLEL
@@ -2685,8 +2935,6 @@ H5C_insert_entry(H5F_t * f,
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
HDassert( type );
- HDassert( type->flush );
- HDassert( type->size );
HDassert( H5F_addr_defined(addr) );
HDassert( thing );
@@ -2725,15 +2973,16 @@ H5C_insert_entry(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache.")
} /* end if */
-#ifndef NDEBUG
entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
-#endif /* NDEBUG */
entry_ptr->cache_ptr = cache_ptr;
entry_ptr->addr = addr;
entry_ptr->type = type;
+ entry_ptr->image_ptr = NULL;
+ entry_ptr->image_up_to_date = FALSE;
+
/* Apply tag to newly inserted entry */
- if(H5C_tag_entry(cache_ptr, entry_ptr, primary_dxpl_id) < 0)
+ if(H5C_tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
entry_ptr->is_protected = FALSE;
@@ -2754,10 +3003,27 @@ H5C_insert_entry(H5F_t * f,
/* not protected, so can't be dirtied */
entry_ptr->dirtied = FALSE;
- /* Retrieve the size of the thing */
- if((type->size)(f, thing, &(entry_ptr->size)) < 0)
+ /* Retrieve the size of the thing. Set the compressed field to FALSE
+ * and the compressed_size field to zero first, as they may not be
+ * initialized by the image_len call.
+ */
+ entry_ptr->compressed = FALSE;
+ entry_ptr->compressed_size = 0;
+ if((type->image_len)(thing, &(entry_ptr->size), &(entry_ptr->compressed),
+ &(entry_ptr->compressed_size)) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, "Can't get size of thing")
HDassert(entry_ptr->size > 0 && entry_ptr->size < H5C_MAX_ENTRY_SIZE);
+ HDassert(((type->flags & H5C__CLASS_COMPRESSED_FLAG) != 0) ||
+ (entry_ptr->compressed == FALSE));
+
+ /* entry has just been inserted -- thus compressed size cannot have
+ * been computed yet. Thus if entry_ptr->compressed is TRUE,
+ * entry_ptr->size must equal entry_ptr->compressed_size.
+ */
+ HDassert((entry_ptr->compressed == FALSE) ||
+ (entry_ptr->size == entry_ptr->compressed_size));
+ HDassert((entry_ptr->compressed == TRUE) ||
+ (entry_ptr->compressed_size == 0));
entry_ptr->in_slist = FALSE;
@@ -2768,7 +3034,6 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->flush_in_progress = FALSE;
entry_ptr->destroy_in_progress = FALSE;
- entry_ptr->free_file_space_on_destroy = FALSE;
/* Initialize flush dependency height fields */
entry_ptr->flush_dep_parent = NULL;
@@ -2820,9 +3085,7 @@ H5C_insert_entry(H5F_t * f,
if ( cache_ptr->check_write_permitted != NULL ) {
- result = (cache_ptr->check_write_permitted)(f,
- primary_dxpl_id,
- &write_permitted);
+ result = (cache_ptr->check_write_permitted)(f, &write_permitted);
if ( result < 0 ) {
@@ -2869,11 +3132,9 @@ H5C_insert_entry(H5F_t * f,
*/
result = H5C_make_space_in_cache(f,
- primary_dxpl_id,
- secondary_dxpl_id,
+ dxpl_id,
space_needed,
- write_permitted,
- &first_flush);
+ write_permitted);
if ( result < 0 ) {
@@ -2965,18 +3226,39 @@ done:
* Programmer: John Mainzer
* 7/5/05
*
+ * Changes: Tidied up code, removeing some old commented out
+ * code that had been left in pending success of the
+ * new version.
+ *
+ * Note that unlike H5C_apply_candidate_list(),
+ * H5C_mark_entries_as_clean() makes all its calls to
+ * H6C_flush_single_entry() with the
+ * H5C__FLUSH_CLEAR_ONLY_FLAG set. As a result,
+ * the pre_serialize() and serialize calls are not made.
+ *
+ * This then implies that (assuming such actions were
+ * permitted in the parallel case) no loads, dirties,
+ * resizes, or removals of other entries can occur as
+ * a side effect of the flush. Hence, there is no need
+ * for the checks for entry removal / status change
+ * that I ported to H5C_apply_candidate_list().
+ *
+ * However, if (in addition to allowing such operations
+ * in the parallel case), we allow such operations outside
+ * of the pre_serialize / serialize routines, this may
+ * cease to be the case -- requiring a review of this
+ * function.
+ *
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
herr_t
H5C_mark_entries_as_clean(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
+ hid_t dxpl_id,
int32_t ce_array_len,
haddr_t * ce_array_ptr)
{
H5C_t * cache_ptr;
- hbool_t first_flush = TRUE;
int entries_cleared;
int entries_examined;
int i;
@@ -3070,27 +3352,9 @@ H5C_mark_entries_as_clean(H5F_t * f,
#endif /* H5C_DO_SANITY_CHECKS */
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Listed entry not dirty?!?!?.")
-#if 0 /* original code */
- } else if ( entry_ptr->is_protected ) {
-
- entry_ptr->clear_on_unprotect = TRUE;
} else {
- if ( H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- entry_ptr->type,
- addr,
- H5C__FLUSH_CLEAR_ONLY_FLAG,
- &first_flush,
- TRUE) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
- }
- }
-#else /* modified code */
- } else {
/* Mark the entry to be cleared on unprotect. We will
* scan the LRU list shortly, and clear all those entries
* not currently protected.
@@ -3111,13 +3375,32 @@ H5C_mark_entries_as_clean(H5F_t * f,
}
#endif /* H5C_DO_SANITY_CHECKS */
}
-#endif /* end modified code */
}
-#if 1 /* modified code */
+
/* Scan through the LRU list from back to front, and flush the
* entries whose clear_on_unprotect flags are set. Observe that
* any protected entries will not be on the LRU, and therefore
* will not be flushed at this time.
+ *
+ * Note that unlike H5C_apply_candidate_list(),
+ * H5C_mark_entries_as_clean() makes all its calls to
+ * H6C_flush_single_entry() with the H5C__FLUSH_CLEAR_ONLY_FLAG
+ * set. As a result, the pre_serialize() and serialize calls are
+ * not made.
+ *
+ * This then implies that (assuming such actions were
+ * permitted in the parallel case) no loads, dirties,
+ * resizes, or removals of other entries can occur as
+ * a side effect of the flush. Hence, there is no need
+ * for the checks for entry removal / status change
+ * that I ported to H5C_apply_candidate_list().
+ *
+ * However, if (in addition to allowing such operations
+ * in the parallel case), we allow such operations outside
+ * of the pre_serialize / serialize routines, this may
+ * cease to be the case -- requiring a review of this
+ * point.
+ * JRM -- 4/7/15
*/
entries_cleared = 0;
@@ -3137,13 +3420,11 @@ H5C_mark_entries_as_clean(H5F_t * f,
entries_cleared++;
if ( H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- clear_ptr->type,
+ dxpl_id,
clear_ptr->addr,
H5C__FLUSH_CLEAR_ONLY_FLAG,
- &first_flush,
- TRUE) < 0 ) {
+ TRUE,
+ NULL) < 0 ) {
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
}
@@ -3174,13 +3455,11 @@ H5C_mark_entries_as_clean(H5F_t * f,
entries_cleared++;
if ( H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- clear_ptr->type,
+ dxpl_id,
clear_ptr->addr,
H5C__FLUSH_CLEAR_ONLY_FLAG,
- &first_flush,
- TRUE) < 0 ) {
+ TRUE,
+ NULL) < 0 ) {
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
}
@@ -3211,7 +3490,6 @@ H5C_mark_entries_as_clean(H5F_t * f,
}
HDassert( (entries_cleared + i) == ce_array_len );
#endif /* H5C_DO_SANITY_CHECKS */
-#endif /* modified code */
done:
@@ -3287,6 +3565,7 @@ H5C_mark_entry_dirty(void *thing)
/* mark the entry as dirty if it isn't already */
entry_ptr->is_dirty = TRUE;
+ entry_ptr->image_up_to_date = FALSE;
if ( was_pinned_unprotected_and_clean ) {
@@ -3436,36 +3715,32 @@ H5C_move_entry(H5C_t * cache_ptr,
was_dirty = entry_ptr->is_dirty;
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
- if ( ! ( entry_ptr->flush_in_progress ) ) {
-
- entry_ptr->is_dirty = TRUE;
- }
+ entry_ptr->is_dirty = TRUE;
+ /* This shouldn't be needed, but it keeps the test code happy */
+ entry_ptr->image_up_to_date = FALSE;
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
- if ( ! ( entry_ptr->flush_in_progress ) ) {
-
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
#if H5C_DO_SANITY_CHECKS
- if ( removed_entry_from_slist ) {
-
- /* we just removed the entry from the slist. Thus we
- * must touch up cache_ptr->slist_len_increase and
- * cache_ptr->slist_size_increase to keep from skewing
- * the sanity checks.
- */
- HDassert( cache_ptr->slist_len_increase > 1 );
- HDassert( cache_ptr->slist_size_increase >
- (int64_t)(entry_ptr->size) );
+ if ( removed_entry_from_slist ) {
- cache_ptr->slist_len_increase -= 1;
- cache_ptr->slist_size_increase -= (int64_t)(entry_ptr->size);
- }
+ /* we just removed the entry from the slist. Thus we
+ * must touch up cache_ptr->slist_len_increase and
+ * cache_ptr->slist_size_increase to keep from skewing
+ * the sanity checks.
+ */
+ cache_ptr->slist_len_increase -= 1;
+ cache_ptr->slist_size_increase -= (int64_t)(entry_ptr->size);
+ }
#endif /* H5C_DO_SANITY_CHECKS */
+ if ( ! ( entry_ptr->flush_in_progress ) ) {
+
+ /* skip the update if a flush is in progress */
H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL)
}
}
@@ -3545,6 +3820,11 @@ H5C_resize_entry(void *thing, size_t new_size)
/* mark the entry as dirty if it isn't already */
entry_ptr->is_dirty = TRUE;
+ entry_ptr->image_up_to_date = FALSE;
+
+ /* Release the current image */
+ if( entry_ptr->image_ptr )
+ entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
/* do a flash cache size increase if appropriate */
if ( cache_ptr->flash_size_increase_possible ) {
@@ -3752,18 +4032,6 @@ done:
* or flushed -- nor may it be accessed by another call to
* H5C_protect. Any attempt to do so will result in a failure.
*
- * The primary_dxpl_id and secondary_dxpl_id parameters
- * specify the dxpl_ids used on the first write occasioned
- * by the insertion (primary_dxpl_id), and on all subsequent
- * writes (secondary_dxpl_id). This is useful in the
- * metadata cache, but may not be needed elsewhere. If so,
- * just use the same dxpl_id for both parameters.
- *
- * All reads are performed with the primary_dxpl_id.
- *
- * Similarly, the primary_dxpl_id is passed to the
- * check_write_permitted function if it is called.
- *
* Return: Success: Ptr to the desired entry
* Failure: NULL
*
@@ -3785,12 +4053,21 @@ done:
* entries long before we actually have to evict something
* to make space.
*
+ * JRM -- 9/1/14
+ * Replace the old rw parameter with the flags parameter.
+ * This allows H5C_protect to accept flags other than
+ * H5C__READ_ONLY_FLAG.
+ *
+ * Added support for the H5C__FLUSH_LAST_FLAG and
+ * H5C__FLUSH_COLLECTIVELY_FLAG flags. At present, these
+ * flags are only applied if the entry is not in cache, and
+ * is loaded into the cache as a result of this call.
+ *
*-------------------------------------------------------------------------
*/
void *
H5C_protect(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
+ hid_t dxpl_id,
const H5C_class_t * type,
haddr_t addr,
void * udata,
@@ -3798,9 +4075,12 @@ H5C_protect(H5F_t * f,
{
H5C_t * cache_ptr;
hbool_t hit;
- hbool_t first_flush;
hbool_t have_write_permitted = FALSE;
hbool_t read_only = FALSE;
+ hbool_t flush_last;
+#ifdef H5_HAVE_PARALLEL
+ hbool_t flush_collectively;
+#endif /* H5_HAVE_PARALLEL */
hbool_t write_permitted;
herr_t result;
size_t empty_space;
@@ -3821,8 +4101,6 @@ H5C_protect(H5F_t * f,
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
HDassert( type );
- HDassert( type->flush );
- HDassert( type->load );
HDassert( H5F_addr_defined(addr) );
#if H5C_DO_EXTREME_SANITY_CHECKS
@@ -3836,6 +4114,10 @@ H5C_protect(H5F_t * f,
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
read_only = ( (flags & H5C__READ_ONLY_FLAG) != 0 );
+ flush_last = ( (flags & H5C__FLUSH_LAST_FLAG) != 0 );
+#ifdef H5_HAVE_PARALLEL
+ flush_collectively = ( (flags & H5C__FLUSH_COLLECTIVELY_FLAG) != 0 );
+#endif /* H5_HAVE_PARALLEL */
/* first check to see if the target is in cache */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
@@ -3854,7 +4136,7 @@ H5C_protect(H5F_t * f,
from disk. */
/* Get the dataset transfer property list */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(primary_dxpl_id, H5I_GENPROP_LST)))
+ if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a property list");
/* Get the tag from the DXPL */
@@ -3880,7 +4162,7 @@ H5C_protect(H5F_t * f,
hit = FALSE;
- thing = H5C_load_entry(f, primary_dxpl_id, type, addr, udata);
+ thing = H5C_load_entry(f, dxpl_id, type, addr, udata);
if ( thing == NULL ) {
@@ -3890,7 +4172,7 @@ H5C_protect(H5F_t * f,
entry_ptr = (H5C_cache_entry_t *)thing;
/* Apply tag to newly protected entry */
- if(H5C_tag_entry(cache_ptr, entry_ptr, primary_dxpl_id) < 0)
+ if(H5C_tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry")
/* If the entry is very large, and we are configured to allow it,
@@ -3934,9 +4216,7 @@ H5C_protect(H5F_t * f,
if ( cache_ptr->check_write_permitted != NULL ) {
- result = (cache_ptr->check_write_permitted)(f,
- primary_dxpl_id,
- &write_permitted);
+ result = (cache_ptr->check_write_permitted)(f, &write_permitted);
if ( result < 0 ) {
@@ -3947,7 +4227,6 @@ H5C_protect(H5F_t * f,
have_write_permitted = TRUE;
- first_flush = TRUE;
}
} else {
@@ -3955,7 +4234,6 @@ H5C_protect(H5F_t * f,
have_write_permitted = TRUE;
- first_flush = TRUE;
}
HDassert( entry_ptr->size <= H5C_MAX_ENTRY_SIZE );
@@ -3996,10 +4274,10 @@ H5C_protect(H5F_t * f,
* see no point in worrying about the fourth.
*/
- result = H5C_make_space_in_cache(f, primary_dxpl_id,
- secondary_dxpl_id,
- space_needed, write_permitted,
- &first_flush);
+ result = H5C_make_space_in_cache(f,
+ dxpl_id,
+ space_needed,
+ write_permitted);
if ( result < 0 ) {
@@ -4013,7 +4291,24 @@ H5C_protect(H5F_t * f,
*
* This is no longer true -- due to a bug fix, we may modify
* data on load to repair a file.
+ *
+ * *******************************************
+ *
+ * Set the flush_last (and possibly flush_collectively) fields
+ * of the newly loaded entry before inserting it into the
+ * index. Must do this, as the index tracked the number of
+ * entries with the flush_last field set, but assumes that
+ * the field will not change after insertion into the index.
+ *
+ * Note that this means that the H5C__FLUSH_LAST_FLAG and
+ * H5C__FLUSH_COLLECTIVELY_FLAG flags are ignored if the
+ * entry is already in cache.
*/
+ entry_ptr->flush_me_last = flush_last;
+#ifdef H5_HAVE_PARALLEL
+ entry_ptr->flush_me_collectively = flush_collectively;
+#endif
+
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, NULL)
if ( ( entry_ptr->is_dirty ) && ( ! (entry_ptr->in_slist) ) ) {
@@ -4028,11 +4323,11 @@ H5C_protect(H5F_t * f,
*/
H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL)
- /* If the entry's type has a 'notify' callback send a 'after insertion'
+ /* If the entry's type has a 'notify' callback send a 'after load'
* notice now that the entry is fully integrated into the cache.
*/
if(entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_INSERT, entry_ptr) < 0)
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, NULL, "can't notify client about entry inserted into cache")
}
@@ -4083,9 +4378,7 @@ H5C_protect(H5F_t * f,
if ( cache_ptr->check_write_permitted != NULL ) {
- result = (cache_ptr->check_write_permitted)(f,
- primary_dxpl_id,
- &write_permitted);
+ result = (cache_ptr->check_write_permitted)(f, &write_permitted);
if ( result < 0 ) {
@@ -4096,7 +4389,6 @@ H5C_protect(H5F_t * f,
have_write_permitted = TRUE;
- first_flush = TRUE;
}
} else {
@@ -4104,7 +4396,6 @@ H5C_protect(H5F_t * f,
have_write_permitted = TRUE;
- first_flush = TRUE;
}
}
@@ -4113,10 +4404,8 @@ H5C_protect(H5F_t * f,
(cache_ptr->resize_ctl).epoch_length ) ) {
result = H5C__auto_adjust_cache_size(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- write_permitted,
- &first_flush);
+ dxpl_id,
+ write_permitted);
if ( result != SUCCEED ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
@@ -4150,10 +4439,10 @@ H5C_protect(H5F_t * f,
if(cache_ptr->index_size > cache_ptr->max_cache_size)
cache_ptr->cache_full = TRUE;
- result = H5C_make_space_in_cache(f, primary_dxpl_id,
- secondary_dxpl_id,
- (size_t)0, write_permitted,
- &first_flush);
+ result = H5C_make_space_in_cache(f,
+ dxpl_id,
+ (size_t)0,
+ write_permitted);
if ( result < 0 ) {
@@ -4649,6 +4938,10 @@ done:
* total_entries_skipped_in_msic, total_entries_scanned_in_msic,
* and max_entries_skipped_in_msic fields.
*
+ * JRM -- 4/11/15
+ * Added code displaying the new slist_scan_restarts,
+ * LRU_scan_restarts, and hash_bucket_scan_restarts fields;
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -4660,8 +4953,6 @@ H5C_stats(H5C_t * cache_ptr,
#endif /* H5C_COLLECT_CACHE_STATS */
display_detailed_stats)
{
- herr_t ret_value = SUCCEED; /* Return value */
-
#if H5C_COLLECT_CACHE_STATS
int i;
int64_t total_hits = 0;
@@ -4674,6 +4965,7 @@ H5C_stats(H5C_t * cache_ptr,
int64_t total_clears = 0;
int64_t total_flushes = 0;
int64_t total_evictions = 0;
+ int64_t total_take_ownerships = 0;
int64_t total_moves = 0;
int64_t total_entry_flush_moves = 0;
int64_t total_cache_flush_moves = 0;
@@ -4693,14 +4985,17 @@ H5C_stats(H5C_t * cache_ptr,
size_t aggregate_max_size = 0;
int32_t aggregate_max_pins = 0;
double hit_rate;
- double average_successful_search_depth = 0.0;
- double average_failed_search_depth = 0.0;
- double average_entries_skipped_per_calls_to_msic = 0.0;
- double average_entries_scanned_per_calls_to_msic = 0.0;
+ double average_successful_search_depth = 0.0f;
+ double average_failed_search_depth = 0.0f;
+ double average_entries_skipped_per_calls_to_msic = 0.0f;
+ double average_entries_scanned_per_calls_to_msic = 0.0f;
#endif /* H5C_COLLECT_CACHE_STATS */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
/* This would normally be an assert, but we need to use an HGOTO_ERROR
* call to shut up the compiler.
*/
@@ -4727,6 +5022,7 @@ H5C_stats(H5C_t * cache_ptr,
total_clears += cache_ptr->clears[i];
total_flushes += cache_ptr->flushes[i];
total_evictions += cache_ptr->evictions[i];
+ total_take_ownerships += cache_ptr->take_ownerships[i];
total_moves += cache_ptr->moves[i];
total_entry_flush_moves += cache_ptr->entry_flush_moves[i];
total_cache_flush_moves += cache_ptr->cache_flush_moves[i];
@@ -4761,10 +5057,10 @@ H5C_stats(H5C_t * cache_ptr,
if ( ( total_hits > 0 ) || ( total_misses > 0 ) ) {
- hit_rate = 100.0 * ((double)(total_hits)) /
+ hit_rate = (double)100.0f * ((double)(total_hits)) /
((double)(total_hits + total_misses));
} else {
- hit_rate = 0.0;
+ hit_rate = 0.0f;
}
if ( cache_ptr->successful_ht_searches > 0 ) {
@@ -4878,11 +5174,16 @@ H5C_stats(H5C_t * cache_ptr,
(long)max_read_protects);
HDfprintf(stdout,
- "%s Total clears / flushes / evictions = %ld / %ld / %ld\n",
+ "%s Total clears / flushes = %ld / %ld\n",
cache_ptr->prefix,
(long)total_clears,
- (long)total_flushes,
- (long)total_evictions);
+ (long)total_flushes);
+
+ HDfprintf(stdout,
+ "%s Total evictions / take ownerships = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)total_evictions,
+ (long)total_take_ownerships);
HDfprintf(stdout,
"%s Total insertions(pinned) / moves = %ld(%ld) / %ld\n",
@@ -4931,7 +5232,7 @@ H5C_stats(H5C_t * cache_ptr,
HDfprintf(stdout, "%s MSIC: Average/max entries skipped = %lf / %ld\n",
cache_ptr->prefix,
- (float)average_entries_skipped_per_calls_to_msic,
+ (double)average_entries_skipped_per_calls_to_msic,
(long)(cache_ptr->max_entries_skipped_in_msic));
if (cache_ptr->calls_to_msic > 0) {
@@ -4942,7 +5243,7 @@ H5C_stats(H5C_t * cache_ptr,
HDfprintf(stdout, "%s MSIC: Average/max entries scanned = %lf / %ld\n",
cache_ptr->prefix,
- (float)average_entries_scanned_per_calls_to_msic,
+ (double)average_entries_scanned_per_calls_to_msic,
(long)(cache_ptr->max_entries_scanned_in_msic));
HDfprintf(stdout, "%s MSIC: Scanned to make space(evict) = %lld\n",
@@ -4954,6 +5255,13 @@ H5C_stats(H5C_t * cache_ptr,
(long long)(cache_ptr->total_entries_scanned_in_msic -
cache_ptr->entries_scanned_to_make_space));
+ HDfprintf(stdout,
+ "%s slist/LRU/hash bkt scan restarts = %lld / %lld / %lld.\n",
+ cache_ptr->prefix,
+ (long long)(cache_ptr->slist_scan_restarts),
+ (long long)(cache_ptr->LRU_scan_restarts),
+ (long long)(cache_ptr->hash_bucket_scan_restarts));
+
#if H5C_COLLECT_CACHE_ENTRY_STATS
HDfprintf(stdout, "%s aggregate max / min accesses = %d / %d\n",
@@ -4986,10 +5294,10 @@ H5C_stats(H5C_t * cache_ptr,
if ( ( cache_ptr->hits[i] > 0 ) || ( cache_ptr->misses[i] > 0 ) ) {
- hit_rate = 100.0 * ((double)(cache_ptr->hits[i])) /
+ hit_rate = (double)100.0f * ((double)(cache_ptr->hits[i])) /
((double)(cache_ptr->hits[i] + cache_ptr->misses[i]));
} else {
- hit_rate = 0.0;
+ hit_rate = 0.0f;
}
HDfprintf(stdout,
@@ -5007,11 +5315,16 @@ H5C_stats(H5C_t * cache_ptr,
(int)(cache_ptr->max_read_protects[i]));
HDfprintf(stdout,
- "%s clears / flushes / evictions = %ld / %ld / %ld\n",
+ "%s clears / flushes = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->clears[i]),
- (long)(cache_ptr->flushes[i]),
- (long)(cache_ptr->evictions[i]));
+ (long)(cache_ptr->flushes[i]));
+
+ HDfprintf(stdout,
+ "%s evictions / take ownerships = %ld / %ld\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->evictions[i]),
+ (long)(cache_ptr->take_ownerships[i]));
HDfprintf(stdout,
"%s insertions(pinned) / moves = %ld(%ld) / %ld\n",
@@ -5106,6 +5419,11 @@ done:
* total_entries_skipped_in_msic, total_entries_scanned_in_msic,
* and max_entries_skipped_in_msic fields.
*
+ * JRM 4/11/15
+ * Added code to initialize the new slist_scan_restarts,
+ * LRU_scan_restarts, hash_bucket_scan_restarts, and
+ * take_ownerships fields.
+ *
*-------------------------------------------------------------------------
*/
void
@@ -5139,6 +5457,7 @@ H5C_stats__reset(H5C_t H5_ATTR_UNUSED * cache_ptr)
cache_ptr->clears[i] = 0;
cache_ptr->flushes[i] = 0;
cache_ptr->evictions[i] = 0;
+ cache_ptr->take_ownerships[i] = 0;
cache_ptr->moves[i] = 0;
cache_ptr->entry_flush_moves[i] = 0;
cache_ptr->cache_flush_moves[i] = 0;
@@ -5181,6 +5500,10 @@ H5C_stats__reset(H5C_t H5_ATTR_UNUSED * cache_ptr)
cache_ptr->max_entries_scanned_in_msic = 0;
cache_ptr->entries_scanned_to_make_space = 0;
+ cache_ptr->slist_scan_restarts = 0;
+ cache_ptr->LRU_scan_restarts = 0;
+ cache_ptr->hash_bucket_scan_restarts = 0;
+
#if H5C_COLLECT_CACHE_ENTRY_STATS
for ( i = 0; i <= cache_ptr->max_type_id; i++ )
@@ -5333,6 +5656,107 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5C_dump_cache_skip_list
+ *
+ * Purpose: Debugging routine that prints a summary of the contents of
+ * the skip list used by the metadata cache metadata cache to
+ * maintain an address sorted list of dirty entries.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 11/15/14
+ *
+ *-------------------------------------------------------------------------
+ */
+#if 0 /* debugging routine */
+herr_t
+H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int i;
+ H5C_cache_entry_t * entry_ptr = NULL;
+ H5SL_node_t * node_ptr = NULL;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(calling_fcn != NULL);
+
+ HDfprintf(stdout, "\n\nDumping metadata cache skip list from %s.\n",
+ calling_fcn);
+ HDfprintf(stdout, " slist len = %d.\n", cache_ptr->slist_len);
+ HDfprintf(stdout, " slist size = %lld.\n",
+ (long long)(cache_ptr->slist_size));
+
+ if ( cache_ptr->slist_len > 0 )
+ {
+ /* If we get this far, all entries in the cache are listed in the
+ * skip list -- scan the skip list generating the desired output.
+ */
+
+ HDfprintf(stdout,
+ "Num: Addr: Len: Prot/Pind: Dirty: Type:\n");
+
+ i = 0;
+
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+
+ if ( node_ptr != NULL ) {
+
+ entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
+ } else {
+
+ entry_ptr = NULL;
+ }
+
+ while ( entry_ptr != NULL ) {
+
+ HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
+
+ HDfprintf(stdout,
+ "%s%d 0x%016llx %4lld %d/%d %d %s\n",
+ cache_ptr->prefix, i,
+ (long long)(entry_ptr->addr),
+ (long long)(entry_ptr->size),
+ (int)(entry_ptr->is_protected),
+ (int)(entry_ptr->is_pinned),
+ (int)(entry_ptr->is_dirty),
+ entry_ptr->type->name);
+
+ HDfprintf(stdout, " node_ptr = 0x%llx, item = 0x%llx\n",
+ (unsigned long long)node_ptr,
+ (unsigned long long)H5SL_item(node_ptr));
+
+ /* increment node_ptr before we delete its target */
+ node_ptr = H5SL_next(node_ptr);
+
+ if ( node_ptr != NULL ) {
+
+ entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
+ } else {
+
+ entry_ptr = NULL;
+ }
+
+ i++;
+ }
+ }
+
+ HDfprintf(stdout, "\n\n");
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_dump_cache_skip_list() */
+#endif /* debugging routine */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C_unpin_entry_from_client()
*
* Purpose: Internal routine to unpin a cache entry from a client action.
@@ -5488,8 +5912,7 @@ done:
*/
herr_t
H5C_unprotect(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
+ hid_t dxpl_id,
const H5C_class_t * type,
haddr_t addr,
void * thing,
@@ -5529,8 +5952,6 @@ H5C_unprotect(H5F_t * f,
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
HDassert( type );
- HDassert( type->clear );
- HDassert( type->flush );
HDassert( H5F_addr_defined(addr) );
HDassert( thing );
HDassert( ! ( pin_entry && unpin_entry ) );
@@ -5629,6 +6050,19 @@ H5C_unprotect(H5F_t * f,
/* Mark the entry as dirty if appropriate */
entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied);
+ /* the image_up_to_date field was introduced to support
+ * journaling. Until we re-introduce journaling, this
+ * field should be equal to !entry_ptr->is_dirty.
+ *
+ * When journaling is re-enabled it should be set
+ * to FALSE if dirtied is TRUE.
+ */
+#if 1 /* JRM */
+ entry_ptr->image_up_to_date = FALSE;
+#else /* JRM */
+ entry_ptr->image_up_to_date = !entry_ptr->is_dirty;
+#endif /* JRM */
+
/* Update index for newly dirtied entry */
if(was_clean && entry_ptr->is_dirty)
H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
@@ -5670,13 +6104,6 @@ H5C_unprotect(H5F_t * f,
* JRM - 5/19/04
*/
if ( deleted ) {
-
- /* the following first flush flag will never be used as we are
- * calling H5C_flush_single_entry with both the
- * H5C__FLUSH_CLEAR_ONLY_FLAG and H5C__FLUSH_INVALIDATE_FLAG flags.
- * However, it is needed for the function call.
- */
- hbool_t dummy_first_flush = TRUE;
unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG |
H5C__FLUSH_INVALIDATE_FLAG);
@@ -5698,36 +6125,40 @@ H5C_unprotect(H5F_t * f,
"hash table contains multiple entries for addr?!?.")
}
- /* Pass along 'free file space' flag to cache client */
-
- entry_ptr->free_file_space_on_destroy = free_file_space;
+ /* Set the 'free file space' flag for the flush, if needed */
+ if(free_file_space)
+ flush_flags |= H5C__FREE_FILE_SPACE_FLAG;
/* Set the "take ownership" flag for the flush, if needed */
if(take_ownership)
flush_flags |= H5C__TAKE_OWNERSHIP_FLAG;
if ( H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- type,
+ dxpl_id,
addr,
flush_flags,
- &dummy_first_flush,
- TRUE) < 0 ) {
+ TRUE,
+ NULL) < 0 ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush.")
}
+
+#if H5C_DO_SANITY_CHECKS
+ if ( ( take_ownership ) && ( ! was_clean ) )
+ {
+ /* we have just removed an entry from the skip list. Thus
+ * we must touch up cache_ptr->slist_len_increase and
+ * cache_ptr->slist_size_increase to keep from skewing
+ * the sanity checks on flushes.
+ */
+ cache_ptr->slist_len_increase -= 1;
+ cache_ptr->slist_size_increase -= (int64_t)(entry_ptr->size);
+ }
+#endif /* H5C_DO_SANITY_CHECKS */
}
#ifdef H5_HAVE_PARALLEL
else if ( clear_entry ) {
- /* the following first flush flag will never be used as we are
- * calling H5C_flush_single_entry with the
- * H5C__FLUSH_CLEAR_ONLY_FLAG flag. However, it is needed for
- * the function call.
- */
- hbool_t dummy_first_flush = TRUE;
-
/* verify that the target entry is in the cache. */
H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
@@ -5744,13 +6175,11 @@ H5C_unprotect(H5F_t * f,
}
if ( H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- type,
+ dxpl_id,
addr,
H5C__FLUSH_CLEAR_ONLY_FLAG,
- &dummy_first_flush,
- TRUE) < 0 ) {
+ TRUE,
+ NULL) < 0 ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear.")
}
@@ -6406,10 +6835,8 @@ done:
*/
static herr_t
H5C__auto_adjust_cache_size(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- hbool_t write_permitted,
- hbool_t * first_flush_ptr)
+ hid_t dxpl_id,
+ hbool_t write_permitted)
{
H5C_t * cache_ptr = f->shared->cache;
herr_t result;
@@ -6610,13 +7037,11 @@ H5C__auto_adjust_cache_size(H5F_t * f,
} else {
result = H5C__autoadjust__ageout(f,
+ dxpl_id,
hit_rate,
&status,
&new_max_cache_size,
- primary_dxpl_id,
- secondary_dxpl_id,
- write_permitted,
- first_flush_ptr);
+ write_permitted);
if ( result != SUCCEED ) {
@@ -6759,13 +7184,11 @@ done:
*/
static herr_t
H5C__autoadjust__ageout(H5F_t * f,
+ hid_t dxpl_id,
double hit_rate,
enum H5C_resize_status * status_ptr,
size_t * new_max_cache_size_ptr,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- hbool_t write_permitted,
- hbool_t * first_flush_ptr)
+ hbool_t write_permitted)
{
H5C_t * cache_ptr = f->shared->cache;
herr_t result;
@@ -6806,8 +7229,7 @@ H5C__autoadjust__ageout(H5F_t * f,
if ( cache_ptr->max_cache_size > (cache_ptr->resize_ctl).min_size ){
/* evict aged out cache entries if appropriate... */
- if(H5C__autoadjust__ageout__evict_aged_out_entries(f, primary_dxpl_id,
- secondary_dxpl_id, write_permitted, first_flush_ptr) < 0)
+ if(H5C__autoadjust__ageout__evict_aged_out_entries(f, dxpl_id, write_permitted) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries.")
/* ... and then reduce cache size if appropriate */
@@ -7004,20 +7426,32 @@ done:
*
* Programmer: John Mainzer, 11/22/04
*
+ * Changes: Modified function to detect deletions of entries
+ * during a scan of the LRU, and where appropriate,
+ * restart the scan to avoid proceeding with a next
+ * entry that is no longer in the cache.
+ *
+ * Note the absence of checks after flushes of clean
+ * entries. As a second entry can only be removed by
+ * by a call to the pre_serialize or serialize callback
+ * of the first, and as these callbacks will not be called
+ * on clean entries, no checks are needed.
+ *
+ * JRM -- 4/6/15
+ *
*-------------------------------------------------------------------------
*/
static herr_t
H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- hbool_t write_permitted,
- hbool_t * first_flush_ptr)
+ hid_t dxpl_id,
+ hbool_t write_permitted)
{
H5C_t * cache_ptr = f->shared->cache;
herr_t result;
size_t eviction_size_limit;
size_t bytes_evicted = 0;
hbool_t prev_is_dirty = FALSE;
+ hbool_t restart_scan;
H5C_cache_entry_t * entry_ptr;
H5C_cache_entry_t * next_ptr;
H5C_cache_entry_t * prev_ptr;
@@ -7046,13 +7480,17 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
if ( write_permitted ) {
+ restart_scan = FALSE;
entry_ptr = cache_ptr->LRU_tail_ptr;
while ( ( entry_ptr != NULL ) &&
( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) &&
( bytes_evicted < eviction_size_limit ) )
{
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert( ! (entry_ptr->is_protected) );
+ HDassert( ! (entry_ptr->is_read_only) );
+ HDassert( (entry_ptr->ro_ref_count) == 0 );
next_ptr = entry_ptr->next;
prev_ptr = entry_ptr->prev;
@@ -7064,26 +7502,39 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
if ( entry_ptr->is_dirty ) {
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C_flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
+
result = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- entry_ptr->type,
+ dxpl_id,
entry_ptr->addr,
H5C__NO_FLAGS_SET,
- first_flush_ptr,
- FALSE);
+ FALSE,
+ NULL);
+
+ if ( ( cache_ptr->entries_removed_counter > 1 ) ||
+ ( cache_ptr->last_entry_removed_ptr == prev_ptr ) )
+
+ restart_scan = TRUE;
+
} else {
bytes_evicted += entry_ptr->size;
result = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- entry_ptr->type,
+ dxpl_id,
entry_ptr->addr,
H5C__FLUSH_INVALIDATE_FLAG,
- first_flush_ptr,
- TRUE);
+ TRUE,
+ NULL);
}
if ( result < 0 ) {
@@ -7093,30 +7544,25 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
}
if ( prev_ptr != NULL ) {
-#ifndef NDEBUG
- if ( prev_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC ) {
-
- /* something horrible has happened to *prev_ptr --
- * scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "*prev_ptr corrupt")
- } else
-#endif /* NDEBUG */
- if ( ( prev_ptr->is_dirty != prev_is_dirty )
- ||
- ( prev_ptr->next != next_ptr )
- ||
- ( prev_ptr->is_protected )
- ||
- ( prev_ptr->is_pinned ) ) {
+ if ( ( restart_scan )
+ ||
+ ( prev_ptr->is_dirty != prev_is_dirty )
+ ||
+ ( prev_ptr->next != next_ptr )
+ ||
+ ( prev_ptr->is_protected )
+ ||
+ ( prev_ptr->is_pinned ) ) {
/* something has happened to the LRU -- start over
* from the tail.
*/
+ restart_scan = FALSE;
entry_ptr = cache_ptr->LRU_tail_ptr;
+ H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
+
} else {
entry_ptr = prev_ptr;
@@ -7175,13 +7621,11 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
if ( ! (entry_ptr->is_dirty) ) {
result = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- entry_ptr->type,
+ dxpl_id,
entry_ptr->addr,
H5C__FLUSH_INVALIDATE_FLAG,
- first_flush_ptr,
- TRUE);
+ TRUE,
+ NULL);
if ( result < 0 ) {
@@ -7191,6 +7635,10 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
}
/* just skip the entry if it is dirty, as we can't do
* anything with it now since we can't write.
+ *
+ * Since all entries are clean, serialize() will not be called,
+ * and thus we needn't test to see if the LRU has been changed
+ * out from under us.
*/
entry_ptr = prev_ptr;
@@ -7654,17 +8102,45 @@ done:
* Programmer: John Mainzer
* 3/24/065
*
+ * Changes: Modified function to test for slist chamges in
+ * pre_serialize and serialize callbacks, and re-start
+ * scans through the slist when such changes occur.
+ *
+ * This has been a potential problem for some time,
+ * and there has been code in this function to deal
+ * with elements of this issue. However the shift
+ * to the V3 cache in combination with the activities
+ * of some of the cache clients (in particular the
+ * free space manager and the fractal heap) have
+ * made this re-work necessary in H5C_flush_cache.
+ *
+ * At present, this issue doesn't seem to be causing problems
+ * in H5C_flush_invalidate_cache(). However, it seems
+ * prudent to port the H5C_flush_cache changes to this
+ * function as well.
+ *
+ * JRM -- 12/14/14
+ *
+ * Added code to track entry size change during flush single
+ * entry. This didn't used to be a problem, as the entry
+ * was largely removed from the cache data structures before
+ * the flush proper. However, re-entrant calls to the cache
+ * in the parallel case required a re-factoring of the
+ * H5C_flush_single_entry() function to keep entries fully
+ * in the cache until after the pre-serialize and serialize
+ * calls.
+ * JRM -- 12/25/14
+ *
*-------------------------------------------------------------------------
*/
static herr_t
-H5C_flush_invalidate_cache(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
+H5C_flush_invalidate_cache(const H5F_t * f,
+ hid_t dxpl_id,
unsigned flags)
{
H5C_t * cache_ptr = f->shared->cache;
herr_t status;
- hbool_t first_flush = TRUE;
+ hbool_t restart_slist_scan;
int32_t protected_entries = 0;
int32_t i;
int32_t cur_pel_len;
@@ -7675,10 +8151,14 @@ H5C_flush_invalidate_cache(H5F_t * f,
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t actual_slist_len = 0;
+ int64_t flushed_slist_len = 0;
int64_t initial_slist_len = 0;
- size_t actual_slist_size = 0;
+ int64_t flushed_slist_size = 0;
size_t initial_slist_size = 0;
+ int64_t entry_size_change;
+ int64_t * entry_size_change_ptr = &entry_size_change;
+#else /* H5C_DO_SANITY_CHECKS */
+ int64_t * entry_size_change_ptr = NULL;
#endif /* H5C_DO_SANITY_CHECKS */
herr_t ret_value = SUCCEED;
@@ -7757,25 +8237,6 @@ H5C_flush_invalidate_cache(H5F_t * f,
* may be created by the flush call backs. Thus it is possible
* that the slist will not be empty after we finish the scan.
*/
- if ( cache_ptr->slist_len == 0 ) {
-
- node_ptr = NULL;
- HDassert( cache_ptr->slist_size == 0 );
-
- } else {
-
- /* Start at beginning of skip list each time */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
- HDassert( node_ptr != NULL );
-
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if ( NULL == next_entry_ptr )
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
- }
#if H5C_DO_SANITY_CHECKS
/* Depending on circumstances, H5C_flush_single_entry() will
@@ -7799,63 +8260,80 @@ H5C_flush_invalidate_cache(H5F_t * f,
cache_ptr->slist_len_increase = 0;
cache_ptr->slist_size_increase = 0;
- /* Finally, reset the actual_slist_len and actual_slist_size
+ /* Finally, reset the flushed_slist_len and flushed_slist_size
* fields to zero, as these fields are used to accumulate
* the slist lenght and size that we see as we scan through
* the slist.
*/
- actual_slist_len = 0;
- actual_slist_size = 0;
+ flushed_slist_len = 0;
+ flushed_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- while ( node_ptr != NULL )
+ /* set the cache_ptr->slist_change_in_pre_serialize and
+ * cache_ptr->slist_change_in_serialize to false.
+ *
+ * These flags are set to TRUE by H5C_flush_single_entry if the
+ * slist is modified by a pre_serialize or serialize call
+ * respectively.
+ *
+ * H5C_flush_invalidate_cache() uses these flags to detect any
+ * modifications to the slist that might corrupt the scan of
+ * the slist -- and restart the scan in this event.
+ */
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ /* this done, start the scan of the slist */
+
+ restart_slist_scan = TRUE;
+
+ while ( ( restart_slist_scan ) || ( node_ptr != NULL ) )
{
- entry_ptr = next_entry_ptr;
+ if ( restart_slist_scan )
+ {
+ restart_slist_scan = FALSE;
- /* With the advent of the fractal heap, it is possible
- * that the flush callback will dirty and/or resize
- * other entries in the cache. In particular, while
- * Quincey has promised me that this will never happen,
- * it is possible that the flush callback for an
- * entry may protect an entry that is not in the cache,
- * perhaps causing the cache to flush and possibly
- * evict the entry associated with node_ptr to make
- * space for the new entry.
- *
- * Thus we do a bit of extra sanity checking on entry_ptr,
- * and break out of this scan of the skip list if we
- * detect major problems. We have a bit of leaway on the
- * number of passes though the skip list, so this shouldn't
- * be an issue in the flush in and of itself, as it should
- * be all but impossible for this to happen more than once
- * in any flush.
- *
- * Observe that that breaking out of the scan early
- * shouldn't break the sanity checks just after the end
- * of this while loop.
- *
- * If an entry has merely been marked clean and removed from
- * the s-list, we simply break out of the scan.
- *
- * If the entry has been evicted, we flag an error and
- * exit.
- */
-#ifndef NDEBUG
- if ( entry_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC ) {
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry_ptr->magic is invalid ?!?!")
+ if ( node_ptr == NULL )
+ {
+ /* the slist is empty -- break out of inner loop */
+ break;
+ }
+ HDassert( node_ptr != NULL );
- } else
-#endif /* NDEBUG */
- if ( ( ! entry_ptr->is_dirty ) ||
- ( ! entry_ptr->in_slist ) ) {
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- /* the s-list has been modified out from under us.
- * break out of the loop.
- */
- goto end_of_inner_loop;;
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL ?!?!")
+
+ HDassert( next_entry_ptr->magic == \
+ H5C__H5C_CACHE_ENTRY_T_MAGIC );
+ HDassert( next_entry_ptr->is_dirty );
+ HDassert( next_entry_ptr->in_slist );
}
+ entry_ptr = next_entry_ptr;
+
+ /* It is possible that entries will be dirtied, resized,
+ * flushed, or removed from the cache via the take ownership
+ * flag as the result of pre_serialize or serialized callbacks.
+ *
+ * This in turn can corrupt the scan through the slist.
+ *
+ * We test for slist modifications in the pre_serialize
+ * and serialize callbacks, and restart the scan of the
+ * slist if we find them. However, best we do some extra
+ * sanity checking just in case.
+ */
+
+ HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
+ HDassert( entry_ptr->in_slist );
+ HDassert( entry_ptr->is_dirty );
+
/* increment node pointer now, before we delete its target
* from the slist.
*/
@@ -7868,6 +8346,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
HDassert( next_entry_ptr->is_dirty );
HDassert( next_entry_ptr->in_slist );
+ HDassert( entry_ptr != next_entry_ptr );
} else {
next_entry_ptr = NULL;
}
@@ -7891,21 +8370,6 @@ H5C_flush_invalidate_cache(H5F_t * f,
( cache_ptr->num_last_entries >=
cache_ptr->slist_len ) ) ) {
-#if H5C_DO_SANITY_CHECKS
- /* update actual_slist_len & actual_slist_size before
- * the flush. Note that the entry will be removed
- * from the slist after the flush, and thus may be
- * resized by the flush callback. This is OK, as
- * we will catch the size delta in
- * cache_ptr->slist_size_increase.
- *
- * Note that we include pinned entries in this count, even
- * though we will not actually flush them.
- */
- actual_slist_len++;
- actual_slist_size += entry_ptr->size;
-#endif /* H5C_DO_SANITY_CHECKS */
-
if ( entry_ptr->is_protected ) {
/* we have major problems -- but lets flush
@@ -7921,14 +8385,26 @@ H5C_flush_invalidate_cache(H5F_t * f,
* as pinned entries can't be evicted.
*/
if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
+#if H5C_DO_SANITY_CHECKS
+ /* update flushed_slist_len & flushed_slist_size
+ * before the flush. Note that the entry will
+ * be removed from the slist after the flush,
+ * and thus may be resized by the flush callback.
+ * This is OK, as we will catch the size delta in
+ * cache_ptr->slist_size_increase.
+ *
+ */
+ flushed_slist_len++;
+ flushed_slist_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+
status = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- NULL,
+ dxpl_id,
entry_ptr->addr,
H5C__NO_FLAGS_SET,
- &first_flush,
- FALSE);
+ FALSE,
+ entry_size_change_ptr);
if ( status < 0 ) {
/* This shouldn't happen -- if it does, we
@@ -7938,7 +8414,32 @@ H5C_flush_invalidate_cache(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
"dirty pinned entry flush failed.")
} /* end if */
+
+#if H5C_DO_SANITY_CHECKS
+ /* entry size may have changed during the flush.
+ * Update flushed_slist_size to account for this.
+ */
+ flushed_slist_size += entry_size_change;
+#endif /* H5C_DO_SANITY_CHECKS */
+
flushed_during_dep_loop = TRUE;
+
+ if ( ( cache_ptr->slist_change_in_serialize ) ||
+ ( cache_ptr->slist_change_in_pre_serialize ) )
+ {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize
+ = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
+ }
} /* end if */
else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
/* This shouldn't happen -- if it does, just scream and die. */
@@ -7946,15 +8447,26 @@ H5C_flush_invalidate_cache(H5F_t * f,
} /* end if */
else {
if(entry_ptr->flush_dep_height == curr_flush_dep_height ){
+#if H5C_DO_SANITY_CHECKS
+ /* update flushed_slist_len & flushed_slist_size
+ * before the flush. Note that the entry will
+ * be removed from the slist after the flush,
+ * and thus may be resized by the flush callback.
+ * This is OK, as we will catch the size delta in
+ * cache_ptr->slist_size_increase.
+ *
+ */
+ flushed_slist_len++;
+ flushed_slist_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
status = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- NULL,
- entry_ptr->addr,
- (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG),
- &first_flush,
- TRUE);
+ dxpl_id,
+ entry_ptr->addr,
+ (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG),
+ TRUE,
+ entry_size_change_ptr);
if ( status < 0 ) {
/* This shouldn't happen -- if it does, we
@@ -7964,7 +8476,32 @@ H5C_flush_invalidate_cache(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
"dirty entry flush destroy failed.")
} /* end if */
+
+#if H5C_DO_SANITY_CHECKS
+ /* entry size may have changed during the flush.
+ * Update flushed_slist_size to account for this.
+ */
+ flushed_slist_size += entry_size_change;
+#endif /* H5C_DO_SANITY_CHECKS */
+
flushed_during_dep_loop = TRUE;
+
+ if ((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize))
+ {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize
+ = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ }
} /* end if */
else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
/* This shouldn't happen -- if it does, just scream and die. */
@@ -7985,11 +8522,10 @@ H5C_flush_invalidate_cache(H5F_t * f,
if ( node_ptr == NULL ) {
- HDassert( (actual_slist_len + cache_ptr->slist_len) ==
+ HDassert( (flushed_slist_len + cache_ptr->slist_len) ==
(initial_slist_len + cache_ptr->slist_len_increase) );
- HDassert( (actual_slist_size + cache_ptr->slist_size) ==
- (initial_slist_size +
- (size_t)(cache_ptr->slist_size_increase)) );
+ HDassert( (flushed_slist_size + (int64_t)cache_ptr->slist_size) ==
+ ((int64_t)initial_slist_size + cache_ptr->slist_size_increase) );
}
#endif /* H5C_DO_SANITY_CHECKS */
@@ -8011,9 +8547,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
while ( next_entry_ptr != NULL )
{
entry_ptr = next_entry_ptr;
-#ifndef NDEBUG
HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
-#endif /* NDEBUG */
next_entry_ptr = entry_ptr->ht_next;
HDassert ( ( next_entry_ptr == NULL ) ||
@@ -8042,14 +8576,40 @@ H5C_flush_invalidate_cache(H5F_t * f,
* If we can, go ahead and flush.
*/
if(entry_ptr->flush_dep_height == curr_flush_dep_height ){
+ /* if *entry_ptr is dirty, it is possible
+ * that one or more other entries may be
+ * either removed from the cache, loaded
+ * into the cache, or moved to a new location
+ * in the file as a side effect of the flush.
+ *
+ * If this happens, and one of the target
+ * entries happens to be the next entry in
+ * the hash bucket, we could find ourselves
+ * either find ourselves either scanning a
+ * non-existant entry, scanning through a
+ * different bucket, or skipping an entry.
+ *
+ * Neither of these are good, so restart the
+ * the scan at the head of the hash bucket
+ * after the flush if *entry_ptr was dirty,
+ * on the off chance that the next entry was
+ * a target.
+ *
+ * This is not as inefficient at it might seem,
+ * as hash buckets typically have at most two
+ * or three entries.
+ */
+ hbool_t entry_was_dirty;
+
+ entry_was_dirty = entry_ptr->is_dirty;
+
status = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- NULL,
- entry_ptr->addr,
- (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG),
- &first_flush,
- TRUE);
+ dxpl_id,
+ entry_ptr->addr,
+ (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG),
+ TRUE,
+ NULL);
+
if ( status < 0 ) {
/* This shouldn't happen -- if it does,
@@ -8059,6 +8619,17 @@ H5C_flush_invalidate_cache(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
"Entry flush destroy failed.")
}
+
+ if ( entry_was_dirty ) {
+
+ /* update stats for hash bucket scan
+ * restart here.
+ * -- JRM
+ */
+ next_entry_ptr = cache_ptr->index[i];
+ H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr)
+ }
+
flushed_during_dep_loop = TRUE;
} /* end if */
else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
@@ -8074,7 +8645,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
* of pinned entries from pass to pass. If it stops
* shrinking before it hits zero, we scream and die.
*/
- /* if the flush function on the entry we last evicted
+ /* if the serialize function on the entry we last evicted
* loaded an entry into cache (as Quincey has promised me
* it never will), and if the cache was full, it is
* possible that *next_entry_ptr was flushed or evicted.
@@ -8083,8 +8654,13 @@ H5C_flush_invalidate_cache(H5F_t * f,
* test is triggred, we are accessing a deallocated piece
* of dynamically allocated memory, so we just scream and
* die.
+ *
+ * Update: The code to restart the scan after flushes
+ * of dirty entries should make it impossible
+ * to satisfy the following test. Leave it in
+ * in case I am wrong.
+ * -- JRM
*/
-#ifndef NDEBUG
if ( ( next_entry_ptr != NULL ) &&
( next_entry_ptr->magic !=
H5C__H5C_CACHE_ENTRY_T_MAGIC ) ) {
@@ -8095,7 +8671,6 @@ H5C_flush_invalidate_cache(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"next_entry_ptr->magic is invalid?!?!?.")
}
-#endif /* NDEBUG */
} /* end while loop scanning hash table bin */
} /* end for loop scanning hash table */
@@ -8113,7 +8688,6 @@ H5C_flush_invalidate_cache(H5F_t * f,
curr_flush_dep_height++;
} /* end while loop over flush dependency heights */
-end_of_inner_loop:
old_pel_len = cur_pel_len;
cur_pel_len = cache_ptr->pel_len;
@@ -8191,22 +8765,9 @@ done:
* Attempts to flush a protected entry will result in an
* error.
*
- * *first_flush_ptr should be true if only one
- * flush is contemplated before the next load, or if this
- * is the first of a sequence of flushes that will be
- * completed before the next load. *first_flush_ptr is set
- * to false if a flush actually takes place, and should be
- * left false until the end of the sequence.
- *
- * The primary_dxpl_id is used if *first_flush_ptr is TRUE
- * on entry, and a flush actually takes place. The
- * secondary_dxpl_id is used in any subsequent flush where
- * *first_flush_ptr is FALSE on entry.
- *
* If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
- * be cleared and not flushed -- in the case *first_flush_ptr,
- * primary_dxpl_id, and secondary_dxpl_id are all irrelevent,
- * and the call can't be part of a sequence of flushes.
+ * be cleared and not flushed, and the call can't be part of a
+ * sequence of flushes.
*
* If the caller knows the address of the skip list node at
* which the target entry resides, it can avoid a lookup
@@ -8223,27 +8784,57 @@ done:
*
* Programmer: John Mainzer, 5/5/04
*
+ * Changes: Refactored function to remove the type_ptr parameter.
+ *
+ * JRM -- 8/7/14
+ *
+ * Added code to check for slist changes in pre_serialize and
+ * serialize calls, and set
+ * cache_ptr->slist_change_in_pre_serialize and
+ * cache_ptr->slist_change_in_serialize as appropriate.
+ *
+ * JRM -- 12/13/14
+ *
+ * Refactored function to delay all modifications of the
+ * metadata cache data structures until after any calls
+ * to the pre-serialize or serialize callbacks.
+ *
+ * Need to do this, as some pre-serialize or serialize
+ * calls result in calls to the metadata cache and
+ * modifications to its data structures. Thus, at the
+ * time of any such call, the target entry flags and
+ * the metadata cache must all be consistant.
+ *
+ * Also added the entry_size_change_ptr parameter, which
+ * allows the function to report back any change in the size
+ * of the entry during the flush. Such size changes may
+ * occur during the pre-serialize callback.
+ *
+ * JRM -- 12/24/14
+ *
*-------------------------------------------------------------------------
*/
static herr_t
-H5C_flush_single_entry(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- const H5C_class_t * type_ptr,
+H5C_flush_single_entry(const H5F_t * f,
+ hid_t dxpl_id,
haddr_t addr,
unsigned flags,
- hbool_t * first_flush_ptr,
- hbool_t del_entry_from_slist_on_destroy)
+ hbool_t del_entry_from_slist_on_destroy,
+ int64_t * entry_size_change_ptr)
{
H5C_t * cache_ptr = f->shared->cache;
hbool_t destroy; /* external flag */
hbool_t clear_only; /* external flag */
+ hbool_t free_file_space; /* external flag */
hbool_t take_ownership; /* external flag */
+ hbool_t write_entry; /* internal flag */
+ hbool_t destroy_entry; /* internal flag */
hbool_t was_dirty;
- hbool_t destroy_entry;
herr_t status;
- int type_id;
- unsigned flush_flags = H5C_CALLBACK__NO_FLAGS_SET;
+ haddr_t new_addr = HADDR_UNDEF;
+ haddr_t old_addr = HADDR_UNDEF;
+ size_t new_len = 0;
+ size_t new_compressed_len = 0;
H5C_cache_entry_t * entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
@@ -8253,11 +8844,15 @@ H5C_flush_single_entry(H5F_t * f,
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
HDassert( H5F_addr_defined(addr) );
- HDassert( first_flush_ptr );
+
+ /* If defined, initialize *entry_size_change_ptr to 0 */
+ if(entry_size_change_ptr != NULL)
+ *entry_size_change_ptr = 0;
/* setup external flags from the flags parameter */
destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
+ free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
/* Set the flag for destroying the entry, based on the 'take ownership'
@@ -8271,6 +8866,14 @@ H5C_flush_single_entry(H5F_t * f,
/* attempt to find the target entry in the hash table */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+ /* we will write the entry to disk if it exists, is dirty, and if the
+ * clear only flag is not set.
+ */
+ if ( ( entry_ptr != NULL ) && ( entry_ptr->is_dirty ) && ( ! clear_only ) )
+ write_entry = TRUE;
+ else
+ write_entry = FALSE;
+
/* run initial sanity checks */
#if H5C_DO_SANITY_CHECKS
if ( entry_ptr != NULL ) {
@@ -8279,6 +8882,8 @@ H5C_flush_single_entry(H5F_t * f,
if ( entry_ptr->in_slist ) {
+ HDassert(entry_ptr->is_dirty);
+
if ( ( ( entry_ptr->flush_marker ) && ( ! entry_ptr->is_dirty ) ) ||
( entry_ptr->addr != addr ) ) {
@@ -8287,6 +8892,10 @@ H5C_flush_single_entry(H5F_t * f,
}
} else {
+ HDassert(!entry_ptr->is_dirty);
+ HDassert(!entry_ptr->flush_marker);
+ HDassert(entry_ptr->addr == addr);
+
if ( ( entry_ptr->is_dirty ) ||
( entry_ptr->flush_marker ) ||
( entry_ptr->addr != addr ) ) {
@@ -8300,22 +8909,32 @@ H5C_flush_single_entry(H5F_t * f,
if ( ( entry_ptr != NULL ) && ( entry_ptr->is_protected ) )
{
+ HDassert(!entry_ptr->is_protected);
/* Attempt to flush a protected entry -- scream and die. */
HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, \
"Attempt to flush a protected entry.")
}
- if ( ( entry_ptr != NULL ) &&
- ( ( type_ptr == NULL ) || ( type_ptr->id == entry_ptr->type->id ) ) )
+ /* if the entry exists, set entry_ptr->flush_in_progress = TRUE
+ * and set entry_ptr->flush_marker = FALSE
+ *
+ * in the parallel case, do some sanity checking in passing.
+ */
+ if ( entry_ptr != NULL )
{
/* we have work to do */
+ HDassert(entry_ptr->type);
+
+ was_dirty = entry_ptr->is_dirty; /* needed later for logging */
/* We will set flush_in_progress back to FALSE at the end if the
* entry still exists at that point.
*/
entry_ptr->flush_in_progress = TRUE;
+ entry_ptr->flush_marker = FALSE;
+
#ifdef H5_HAVE_PARALLEL
#ifndef NDEBUG
/* If MPI based VFD is used, do special parallel I/O sanity checks.
@@ -8329,7 +8948,7 @@ H5C_flush_single_entry(H5F_t * f,
unsigned coll_meta; /* Collective metadata write flag */
/* Get the dataset transfer property list */
- if(NULL == (dxpl = H5I_object(primary_dxpl_id)))
+ if(NULL == (dxpl = H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "not a dataset transfer property list")
/* Get the collective metadata write property */
@@ -8341,278 +8960,647 @@ H5C_flush_single_entry(H5F_t * f,
} /* end if */
#endif /* NDEBUG */
#endif /* H5_HAVE_PARALLEL */
+ }
- was_dirty = entry_ptr->is_dirty;
- type_id = entry_ptr->type->id;
-
- entry_ptr->flush_marker = FALSE;
-
- if ( clear_only ) {
- H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
- } else {
- H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
- }
-
- if ( destroy ) {
- H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr)
- }
+ if ( ( entry_ptr != NULL ) && ( write_entry ) )
+ {
+ /* serialize the entry if necessary, and then write it to disk. */
- /* If the entry's type has a 'notify' callback and the entry is about
- * to be removed from the cache, send a 'before eviction' notice while
- * the entry is still fully integrated in the cache.
- */
- if(destroy) {
- if(entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
- } /* end if */
+ unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
- /* Always remove the entry from the hash table on a destroy. On a
- * flush with destroy, it is cheaper to discard the skip list all at
- * once rather than remove the entries one by one, so we only delete
- * from the slist only if requested.
+ /* The entry is dirty, and we are doing either a flush,
+ * or a flush destroy. In either case, serialize the
+ * entry and write it to disk.
*
- * We must do deletions now as the callback routines will free the
- * entry if destroy is true.
+ * Note that this may cause the entry to be re-sized and/or
+ * moved in the cache.
*
- * Note that it is possible that the entry will be moved during
- * its call to flush. This will upset H5C_move_entry() if we
- * don't tell it that it doesn't have to worry about updating the
- * index and SLIST. Use the destroy_in_progress field for this
- * purpose.
+ * As we will not update the metadata cache's data structures
+ * until we we finish the write, we must touch up these
+ * data structures for size and location changes even if we
+ * are about to delete the entry from the cache (i.e. on a
+ * flush destroy).
*/
- if ( destroy ) {
- entry_ptr->destroy_in_progress = TRUE;
+ HDassert(entry_ptr->is_dirty);
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr)
+#if H5C_DO_SANITY_CHECKS
+ if ( ( cache_ptr->check_write_permitted == NULL ) &&
+ ( !(cache_ptr->write_permitted) ) )
- if ( ( entry_ptr->in_slist ) &&
- ( del_entry_from_slist_on_destroy ) ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Write when writes are always forbidden!?!?!")
+#endif /* H5C_DO_SANITY_CHECKS */
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
+ if ( NULL == entry_ptr->image_ptr )
+ {
+ size_t image_size;
+
+ if ( entry_ptr->compressed )
+ image_size = entry_ptr->compressed_size;
+ else
+ image_size = entry_ptr->size;
+
+ HDassert(image_size > 0);
+
+ entry_ptr->image_ptr =
+ H5MM_malloc(image_size + H5C_IMAGE_EXTRA_SPACE);
+
+ if ( NULL == entry_ptr->image_ptr)
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, \
+ "memory allocation failed for on disk image buffer")
}
- }
+#if H5C_DO_MEMORY_SANITY_CHECKS
- /* Update the replacement policy for the flush or eviction.
- * Again, do this now so we don't have to reference freed
- * memory in the destroy case.
- */
- if ( destroy ) { /* AKA eviction */
+ HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size,
+ H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
- H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } else {
+ } /* end if */
- H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
- }
+ if ( ! (entry_ptr->image_up_to_date) )
+ {
+ /* reset cache_ptr->slist_changed so we can detect slist
+ * modifications in the pre_serialize call.
+ */
+ cache_ptr->slist_changed = FALSE;
+
+ /* make note of the entry's current address */
+ old_addr = entry_ptr->addr;
+
+ /* Call client's pre-serialize callback, if there's one */
+ if ( ( entry_ptr->type->pre_serialize != NULL ) &&
+ ( (entry_ptr->type->pre_serialize)(f, dxpl_id,
+ (void *)entry_ptr,
+ entry_ptr->addr,
+ entry_ptr->size,
+ entry_ptr->compressed_size,
+ &new_addr, &new_len,
+ &new_compressed_len,
+ &serialize_flags) < 0 ) )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to pre-serialize entry")
+ }
- /* Clear the dirty flag only, if requested */
- if ( clear_only ) {
+ /* set cache_ptr->slist_change_in_pre_serialize if the
+ * slist was modified.
+ */
+ if ( cache_ptr->slist_changed )
+ cache_ptr->slist_change_in_pre_serialize = TRUE;
- if ( destroy ) {
-#ifndef NDEBUG
- /* we are about to call the clear callback with the
- * destroy flag set -- this will result in *entry_ptr
- * being freed. Set the magic field to bad magic
- * so we can detect a freed cache entry if we see
- * one.
+ /* Check for any flags set in the pre-serialize callback */
+ if ( serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET )
+ {
+ /* Check for unexpected flags from serialize callback */
+ if ( serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG |
+ H5C__SERIALIZE_MOVED_FLAG |
+ H5C__SERIALIZE_COMPRESSED_FLAG))
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unknown serialize flag(s)")
+ }
+#ifdef H5_HAVE_PARALLEL
+ /* In the parallel case, resizes and moves in
+ * the serialize operation can cause problems.
+ * If they occur, scream and die.
+ *
+ * At present, in the parallel case, the aux_ptr
+ * will only be set if there is more than one
+ * process. Thus we can use this to detect
+ * the parallel case.
+ *
+ * This works for now, but if we start using the
+ * aux_ptr for other purposes, we will have to
+ * change this test accordingly.
+ *
+ * NB: While this test detects entryies that attempt
+ * to resize or move themselves during a flush
+ * in the parallel case, it will not detect an
+ * entry that dirties, resizes, and/or moves
+ * other entries during its flush.
+ *
+ * From what Quincey tells me, this test is
+ * sufficient for now, as any flush routine that
+ * does the latter will also do the former.
+ *
+ * If that ceases to be the case, further
+ * tests will be necessary.
+ */
+ if ( cache_ptr->aux_ptr != NULL )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "resize/move in serialize occured in parallel case.")
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Resize the buffer if required */
+ if ( ( ( ! entry_ptr->compressed ) &&
+ ( serialize_flags & H5C__SERIALIZE_RESIZED_FLAG ) ) ||
+ ( ( entry_ptr->compressed ) &&
+ ( serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG ) ) )
+ {
+ size_t new_image_size;
+
+ if ( entry_ptr->compressed )
+ new_image_size = new_compressed_len;
+ else
+ new_image_size = new_len;
+
+ HDassert(new_image_size > 0);
+
+ /* Release the current image */
+ if ( entry_ptr->image_ptr )
+ {
+ entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+ }
+
+ /* Allocate a new image buffer */
+ entry_ptr->image_ptr =
+ H5MM_malloc(new_image_size + H5C_IMAGE_EXTRA_SPACE);
+
+ if ( NULL == entry_ptr->image_ptr )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, \
+ "memory allocation failed for on disk image buffer")
+ }
+
+#if H5C_DO_MEMORY_SANITY_CHECKS
+
+ HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + new_image_size,
+ H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE);
+
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+ } /* end if */
+
+ /* If required, update the entry and the cache data structures
+ * for a resize.
*/
- entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
-#endif /* NDEBUG */
- entry_ptr->cache_ptr = NULL;
- }
- /* Call the callback routine to clear all dirty flags for object */
- if ( (entry_ptr->type->clear)(f, entry_ptr, destroy_entry) < 0 ) {
+ if ( serialize_flags & H5C__SERIALIZE_RESIZED_FLAG ) {
+
+ H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, \
+ entry_ptr, new_len)
+
+ /* update the hash table for the size change*/
+ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, \
+ entry_ptr->size, \
+ new_len, entry_ptr, \
+ !(entry_ptr->is_dirty));
+
+ /* The entry can't be protected since we are
+ * in the process of flushing it. Thus we must
+ * update the replacement policy data
+ * structures for the size change. The macro
+ * deals with the pinned case.
+ */
+ H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear entry")
- }
- } else {
+ /* as we haven't updated the cache data structures for
+ * for the flush or flush destroy yet, the entry should
+ * be in the slist. Thus update it for the size change.
+ */
+ HDassert(entry_ptr->in_slist);
+ H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \
+ new_len)
+
+ /* if defined, update *entry_size_change_ptr for the
+ * change in entry size.
+ */
+ if(entry_size_change_ptr != NULL)
+ *entry_size_change_ptr = (int64_t)new_len - (int64_t)(entry_ptr->size);
+ /* finally, update the entry for its new size */
+ entry_ptr->size = new_len;
+ } /* end if */
+
+ /* If required, udate the entry and the cache data structures
+ * for a move
+ */
+ if(serialize_flags & H5C__SERIALIZE_MOVED_FLAG)
+ {
#if H5C_DO_SANITY_CHECKS
- if ( ( entry_ptr->is_dirty ) &&
- ( cache_ptr->check_write_permitted == NULL ) &&
- ( ! (cache_ptr->write_permitted) ) )
+ int64_t saved_slist_len_increase;
+ int64_t saved_slist_size_increase;
+#endif /* H5C_DO_SANITY_CHECKS */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Write when writes are always forbidden!?!?!")
+ H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
+
+ if ( entry_ptr->addr == old_addr )
+ {
+ /* we must update cache data structures for the
+ * change in address.
+ */
+
+ /* delete the entry from the hash table and the slist */
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr)
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
+
+ /* update the entry for its new address */
+ entry_ptr->addr = new_addr;
+
+ /* and then reinsert in the index and slist */
+ H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
+
+#if H5C_DO_SANITY_CHECKS
+ /* save cache_ptr->slist_len_increase and
+ * cache_ptr->slist_size_increase before the
+ * reinsertion into the slist, and restore
+ * them afterwards to avoid skewing our sanity
+ * checking.
+ */
+ saved_slist_len_increase = cache_ptr->slist_len_increase;
+ saved_slist_size_increase = cache_ptr->slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */
- if ( destroy ) {
-#ifndef NDEBUG
- /* we are about to call the flush callback with the
- * destroy flag set -- this will result in *entry_ptr
- * being freed. Set the magic field to bad magic
- * so we can detect a freed cache entry if we see
- * one.
- */
- entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
-#endif /* NDEBUG */
- entry_ptr->cache_ptr = NULL;
- }
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
- /* Only block for all the processes on the first piece of metadata
- */
+#if H5C_DO_SANITY_CHECKS
+ cache_ptr->slist_len_increase = saved_slist_len_increase;
+ cache_ptr->slist_size_increase = saved_slist_size_increase;
+#endif /* H5C_DO_SANITY_CHECKS */
+ }
+ else /* move is alread done for us -- just do sanity checks */
+ {
+ HDassert(entry_ptr->addr == new_addr);
+ }
+ } /* end if */
- if ( *first_flush_ptr && entry_ptr->is_dirty ) {
+ if ( serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG ) {
+ /* just save the new compressed entry size in
+ * entry_ptr->compressed_size. We don't need to
+ * do more, as compressed size is only used for I/O.
+ */
+ HDassert(entry_ptr->compressed);
+ entry_ptr->compressed_size = new_compressed_len;
+ }
+ } /* end if ( serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET ) */
- status = (entry_ptr->type->flush)(f, primary_dxpl_id, destroy_entry,
- entry_ptr->addr, entry_ptr,
- &flush_flags);
- *first_flush_ptr = FALSE;
+ /* Serialize object into buffer */
+ {
+ size_t image_len;
- } else {
+ if ( entry_ptr->compressed )
+ image_len = entry_ptr->compressed_size;
+ else
+ image_len = entry_ptr->size;
+
+ /* reset cache_ptr->slist_changed so we can detect slist
+ * modifications in the serialize call.
+ */
+ cache_ptr->slist_changed = FALSE;
- status = (entry_ptr->type->flush)(f, secondary_dxpl_id,
- destroy_entry, entry_ptr->addr,
- entry_ptr, &flush_flags);
+
+ if ( entry_ptr->type->serialize(f, entry_ptr->image_ptr,
+ image_len,
+ (void *)entry_ptr) < 0 )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to serialize entry")
+ }
+
+ /* set cache_ptr->slist_change_in_serialize if the
+ * slist was modified.
+ */
+ if ( cache_ptr->slist_changed )
+ cache_ptr->slist_change_in_pre_serialize = TRUE;
+
+#if H5C_DO_MEMORY_SANITY_CHECKS
+
+ HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) +
+ image_len,
+ H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE));
+
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+ entry_ptr->image_up_to_date = TRUE;
}
+ } /* end if ( ! (entry_ptr->image_up_to_date) ) */
+
+ /* Finally, write the image to disk.
+ *
+ * Note that if either the H5C__CLASS_NO_IO_FLAG or the
+ * the H5AC__CLASS_SKIP_WRITES flag is set in the
+ * in the entry's type, we silently skip the write. This
+ * flag should only be used in test code.
+ */
+ if ( ( ((entry_ptr->type->flags) & H5C__CLASS_NO_IO_FLAG) == 0 ) &&
+ ( ((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0 ) )
+ {
+ /* If compression is not enabled, the size of the entry on
+ * disk is entry_prt->size. However if entry_ptr->compressed
+ * is TRUE, the on disk size is entry_ptr->compressed_size.
+ */
+ size_t image_size;
- if ( status < 0 ) {
+ if ( entry_ptr->compressed )
+ image_size = entry_ptr->compressed_size;
+ else
+ image_size = entry_ptr->size;
+ if ( ( H5F_block_write(f, entry_ptr->type->mem_type,
+ entry_ptr->addr,
+ image_size, dxpl_id,
+ entry_ptr->image_ptr) < 0 ) )
+ {
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "unable to flush entry")
+ "Can't write image to file.")
}
+ }
-#ifdef H5_HAVE_PARALLEL
- if ( flush_flags != H5C_CALLBACK__NO_FLAGS_SET ) {
-
- /* In the parallel case, flush operations can
- * cause problems. If they occur, scream and
- * die.
- *
- * At present, in the parallel case, the aux_ptr
- * will only be set if there is more than one
- * process. Thus we can use this to detect
- * the parallel case.
- *
- * This works for now, but if we start using the
- * aux_ptr for other purposes, we will have to
- * change this test accordingly.
- *
- * NB: While this test detects entryies that attempt
- * to resize or move themselves during a flush
- * in the parallel case, it will not detect an
- * entry that dirties, resizes, and/or moves
- * other entries during its flush.
- *
- * From what Quincey tells me, this test is
- * sufficient for now, as any flush routine that
- * does the latter will also do the former.
- *
- * If that ceases to be the case, further
- * tests will be necessary.
- */
- if ( cache_ptr->aux_ptr != NULL )
+ /* if the entry has a notify callback, notify it that we have
+ * just flushed the entry.
+ */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "resize/move in serialize occured in parallel case.")
+ if ( ( entry_ptr->type->notify ) &&
+ ( (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH,
+ entry_ptr) < 0 ) )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, \
+ "can't notify client of entry flush")
+ }
+ } /* if ( ( entry_ptr != NULL ) && ( write_entry ) ) */
+
+
+ /* At this point, all pre-serialize and serialize calls have been
+ * made if it was appropriate to make them. Similarly, the entry
+ * has been written to disk if desired.
+ *
+ * Thus it is now safe to update the cache data structures for the
+ * flush.
+ */
+
+ if ( entry_ptr != NULL )
+ {
+ /* start by updating the statistics */
+
+ if ( clear_only ) {
+
+ /* only log a clear if the entry was dirty */
+ if ( was_dirty ) {
+
+ H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
+ }
+ } else if ( write_entry ) {
+
+ HDassert( was_dirty );
+
+ /* only log a flush if we actually wrote to disk */
+ H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
- }
-#endif /* H5_HAVE_PARALLEL */
}
- if ( ( ! destroy ) && ( entry_ptr->in_slist ) ) {
+ if ( destroy )
+ {
+ if ( take_ownership )
+ {
+ HDassert(!destroy_entry);
+ }
+ else
+ {
+ HDassert(destroy_entry);
+ }
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
+ H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, \
+ take_ownership)
+ }
+
+ /* If the entry's type has a 'notify' callback and the entry is about
+ * to be removed from the cache, send a 'before eviction' notice while
+ * the entry is still fully integrated in the cache.
+ */
+ if ( destroy ) {
+
+ if ( ( entry_ptr->type->notify ) &&
+ ( (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT,
+ entry_ptr) < 0 ) )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, \
+ "can't notify client about entry to evict")
+ }
+ } /* end if */
+
+
+ /* Update the cache internal data structures. */
+ if ( destroy )
+ {
+ /* Update the cache internal data structures as appropriate
+ * for a destroy. Specifically:
+ *
+ * 1) Delete it from the index
+ *
+ * 2) Delete it from the skip list if requested.
+ *
+ * 3) Update the replacement policy for eviction
+ *
+ * Finally, if the destroy_entry flag is set, discard the
+ * entry.
+ */
+
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr)
+
+ if ( ( entry_ptr->in_slist ) &&
+ ( del_entry_from_slist_on_destroy ) ) {
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
+ }
+
+ H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
}
+ else
+ {
+ HDassert(clear_only || write_entry);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->in_slist);
- if ( ( ! destroy ) && ( was_dirty ) ) {
+ /* We are either doing a flush or a clear.
+ *
+ * A clear and a flush are the same from the point of
+ * view of the replacement policy and the slist.
+ * Hence no differentiation between them.
+ *
+ * JRM -- 7/7/07
+ */
+
+ H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
+
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
+
+
+ /* mark the entry as clean and update the index for
+ * entry clean. Also, call the clear callback
+ * if defined.
+ */
+
+ entry_ptr->is_dirty = FALSE;
H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr);
+
+ if ( ( entry_ptr->type->clear != NULL ) &&
+ ( (entry_ptr->type->clear)(f, (void *)entry_ptr, FALSE) ) )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to clear entry")
+ }
}
- if ( ! destroy ) { /* i.e. if the entry still exists */
+ /* reset the flush_in progress flag */
- HDassert( !(entry_ptr->is_dirty) );
- HDassert( !(entry_ptr->flush_marker) );
- HDassert( !(entry_ptr->in_slist) );
- HDassert( !(entry_ptr->is_protected) );
- HDassert( !(entry_ptr->is_read_only) );
- HDassert( (entry_ptr->ro_ref_count) == 0 );
+ entry_ptr->flush_in_progress = FALSE;
- if ( (flush_flags & H5C_CALLBACK__SIZE_CHANGED_FLAG) != 0 ) {
+ } /* end if */
- /* The entry size changed as a result of the flush.
- *
- * Most likely, the entry was compressed, and the
- * new version is of a different size than the old.
- *
- * In any case, we must update entry and cache size
- * accordingly.
- */
- size_t new_size;
- if ( (entry_ptr->type->size)(f, (void *)entry_ptr, &new_size)
- < 0 ) {
+ /* Internal cache data structures should now be up to date, and
+ * consistant with the status of the entry.
+ *
+ * Now discard the entry if appropriate.
+ */
+ if ( entry_ptr != NULL )
+ {
+ if ( destroy )
+ {
+ /* start by freeing the buffer for the on disk image */
+ if(entry_ptr->image_ptr != NULL)
+ entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+
+ /* Check whether we should free the space in the file that
+ * the entry occupies
+ */
+ if ( free_file_space )
+ {
+
+ size_t fsf_size;
+
+ /* Sanity checks */
+ HDassert(H5F_addr_defined(entry_ptr->addr));
+ HDassert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr));
+#ifndef NDEBUG
+{
+ hbool_t curr_compressed = FALSE;
+ size_t curr_len;
+ size_t curr_compressed_len = 0;
+
+ /* Get the actual image size for the thing again */
+ entry_ptr->type->image_len((void *)entry_ptr, &curr_len, &curr_compressed, &curr_compressed_len);
+ HDassert(curr_len == entry_ptr->size);
+ HDassert(curr_compressed == entry_ptr->compressed);
+ HDassert(curr_compressed_len == entry_ptr->compressed_size);
+}
+#endif /* NDEBUG */
+
+ /* if the file space free size callback is defined, use
+ * it to get the size of the block of file space to free.
+ * Otherwise use entry_ptr->compressed_size if
+ * entry_ptr->compressed == TRUE, and entry_ptr->size
+ * if entry_ptr->compressed == FALSE.
+ */
+ if ( entry_ptr->type->fsf_size )
+ {
+ if ( (entry_ptr->type->fsf_size)((void *)entry_ptr,
+ &fsf_size) < 0 )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, \
+ "unable to get file space free size")
+ }
+ }
+ else if ( entry_ptr->compressed ) /* use compressed size */
+ {
+ fsf_size = entry_ptr->compressed_size;
+ }
+ else /* no file space free size callback -- use entry size */
+ {
+ fsf_size = entry_ptr->size;
+ }
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, \
- "Can't get entry size after flush")
+ /* Release the space on disk */
+ if ( H5MF_xfree(f, entry_ptr->type->mem_type, dxpl_id,
+ entry_ptr->addr, (hsize_t)fsf_size) < 0)
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, \
+ "unable to free file space for cache entry")
}
+ } /* end if ( free_file_space ) */
- if ( new_size != entry_ptr->size ) {
- HDassert( entry_ptr->size < H5C_MAX_ENTRY_SIZE );
+ /* Reset the pointer to the cache the entry is within. -QAK */
+ entry_ptr->cache_ptr = NULL;
- /* update the hash table for the size change
- * We pass TRUE as the was_clean parameter, as we
- * have already updated the clean and dirty index
- * size fields for the fact that the entry has
- * been flushed. (See above call to
- * H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN()).
- */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), \
- (entry_ptr->size), \
- (new_size), \
- (entry_ptr), \
- (TRUE))
-
- /* The entry can't be protected since we just flushed it.
- * Thus we must update the replacement policy data
- * structures for the size change. The macro deals
- * with the pinned case.
- */
- H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, \
- new_size)
+ /* increment entries_removed_counter and set
+ * last_entry_removed_ptr. As we are likely abuut to
+ * free the entry, recall that last_entry_removed_ptr
+ * must NEVER be dereferenced.
+ *
+ * Recall that these fields are maintained to allow functions
+ * that perform scans of lists of entries to detect the
+ * unexpected removal of entries (via expunge, eviction,
+ * or take ownership at present), so that they can re-start
+ * their scans if necessary.
+ */
+ cache_ptr->last_entry_removed_ptr++;
+ cache_ptr->last_entry_removed_ptr = entry_ptr;
- /* The entry can't be in the slist, so no need to update
- * the slist for the size change.
- */
+ /* Check for actually destroying the entry in memory */
+ /* (As opposed to taking ownership of it) */
+ if ( destroy_entry )
+ {
+ /* if the entry is dirty and it has a clear callback,
+ * call this callback now. Since this callback exists,
+ * it follows tht the client maintains its own dirty bits,
+ * which must be cleared before the entry is freed to avoid
+ * sanity check failures. Also clear the dirty flag for
+ * the same reason.
+ */
- /* update stats for the size change */
- H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, \
- entry_ptr, \
- new_size)
+ if ( entry_ptr->is_dirty ) {
- /* finally, update the entry size proper */
- entry_ptr->size = new_size;
- }
- }
+ entry_ptr->is_dirty = FALSE;
- if ( (flush_flags & H5C_CALLBACK__MOVED_FLAG) != 0 ) {
-
- /* The entry was moved as the result of the flush.
- *
- * Most likely, the entry was compressed, and the
- * new version is larger than the old and thus had
- * to be relocated.
- *
- * At preset, all processing for this case is
- * handled elsewhere. But lets keep the if statement
- * around just in case.
- */
+ if ( ( entry_ptr->type->clear != NULL ) &&
+ ( (entry_ptr->type->clear)(f, (void *)entry_ptr, TRUE) ) )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to clear entry")
+ }
+ }
- }
+ /* we are about to discard the in core representation --
+ * set the magic field to bad magic so we can detect a
+ * freed entry if we see one.
+ */
+ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
- /* reset the flush_in progress flag */
- entry_ptr->flush_in_progress = FALSE;
- }
+ /* verify that the image has been freed */
+ HDassert( entry_ptr->image_ptr == NULL );
+
+ if ( entry_ptr->type->free_icr((void *)entry_ptr) < 0 )
+ {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "free_icr callback failed.")
+ }
+ }
+ else
+ {
+ HDassert(take_ownership);
+
+ /* client is taking ownership of the entry.
+ * set bad magic here too so the cache will choke
+ * unless the entry is re-inserted properly
+ */
+ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
+ }
+ } /* if ( destroy ) */
+ }
+
+ if ( entry_ptr != NULL )
+ {
if ( cache_ptr->log_flush ) {
- status = (cache_ptr->log_flush)(cache_ptr, addr, was_dirty,
- flags, type_id);
+ status = (cache_ptr->log_flush)(cache_ptr, addr, was_dirty, flags);
if ( status < 0 ) {
@@ -8623,8 +9611,14 @@ H5C_flush_single_entry(H5F_t * f,
}
done:
- HDassert( ( destroy ) ||
+
+ HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
( ( entry_ptr ) && ( ! entry_ptr->flush_in_progress ) ) );
+
+ HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
+ ( take_ownership ) ||
+ ( ( entry_ptr ) && ( ! entry_ptr->is_dirty ) ) );
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_flush_single_entry() */
@@ -8654,8 +9648,19 @@ H5C_load_entry(H5F_t * f,
haddr_t addr,
void * udata)
{
+ hbool_t dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */
+ hbool_t compressed = FALSE; /* flag indicating whether thing */
+ /* will be run through filters on */
+ /* on read and write. Usually FALSE */
+ /* set to true if appropriate. */
+ size_t compressed_size = 0; /* entry compressed size if */
+ /* known -- otherwise uncompressed. */
+ /* Zero indicates compression not */
+ /* enabled. */
+ void * image = NULL; /* Buffer for disk image */
void * thing = NULL; /* Pointer to thing loaded */
H5C_cache_entry_t * entry; /* Alias for thing loaded, as cache entry */
+ size_t len; /* Size of image in file */
unsigned u; /* Local index variable */
void * ret_value; /* Return value */
@@ -8665,14 +9670,310 @@ H5C_load_entry(H5F_t * f,
HDassert(f->shared);
HDassert(f->shared->cache);
HDassert(type);
- HDassert(type->load);
- HDassert(type->size);
+
+ /* verify absence of prohibited or unsupported type flag combinations */
+ HDassert(!(type->flags & H5C__CLASS_NO_IO_FLAG));
+
+ /* for now, we do not combine the speculative load and compressed flags */
+ HDassert(!((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) &&
+ (type->flags & H5C__CLASS_COMPRESSED_FLAG)));
+
+ /* Can't see how skip reads could be usefully combined with
+ * either the speculative read or compressed flags. Hence disallow.
+ */
+ HDassert(!((type->flags & H5C__CLASS_SKIP_READS) &&
+ (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
+ HDassert(!((type->flags & H5C__CLASS_SKIP_READS) &&
+ (type->flags & H5C__CLASS_COMPRESSED_FLAG)));
+
HDassert(H5F_addr_defined(addr));
+ HDassert(type->get_load_size);
+ HDassert(type->deserialize);
+
+ /* Call the get_load_size callback, to retrieve the initial
+ * size of image
+ */
+ if(type->get_load_size(udata, &len) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size")
+
+ HDassert(len > 0);
+
+ /* Check for possible speculative read off the end of the file */
+ if(type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) {
+
+/* Quincey has added patches for eoa calculations -- leave the original
+ * code around until we see the effect of these patches.
+ * JRM -- 1/1/15
+ */
+#if 0 /* original code */ /* JRM */
+ /* the original version of this code has several problems:
+ *
+ * First, the sblock is not available until the sblock
+ * has been read in, which causes a seg fault. This is
+ * dealt with easily enough by testing to see if
+ * f->shared->sblock is NULL, and calling H5FD_get_base_addr()
+ * to obtain the base addr when it is.
+ *
+ * The second issue is more subtle. H5F_get_eoa() calls
+ * H5FD_get_eoa(). However, this function returns the EOA as
+ * a relative address -- i.e. relative to the base address.
+ * This means that the base addr + addr < eoa sanity check will
+ * fail whenever the super block is not at address 0 when
+ * reading in the first chunk of the super block.
+ *
+ * To address these issues, I have rewritten the code to
+ * simply verify that the address plus length is less than
+ * the eoa. I think this is sufficient, but further testing
+ * should tell me if it isn't.
+ * JRM -- 8/29/14
+ */
+ haddr_t eoa; /* End-of-allocation in the file */
+ haddr_t base_addr; /* Base address of file data */
+
+ /* Get the file's end-of-allocation value */
+ eoa = H5F_get_eoa(f, type->mem_type);
+ HDassert(H5F_addr_defined(eoa));
+
+ /* Get the file's base address */
+ if ( f->shared->sblock )
+
+ base_addr = H5F_BASE_ADDR(f);
+
+ else { /* sblock not loaded yet -- use file driver info */
+
+ HDassert(f->shared->lf);
+ base_addr = H5FD_get_base_addr(f->shared->lf);
+
+ }
+ HDassert(H5F_addr_defined(base_addr));
+
+ /* Check for bad address in general */
+ if((addr + base_addr) > eoa)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, \
+ "address of object past end of allocation")
+
+ /* Check if the amount of data to read will be past the eoa */
+ if((addr + base_addr + len) > eoa)
+ /* Trim down the length of the metadata */
+ len = (size_t)(eoa - (addr + base_addr));
+
+#else /* modified code */ /* JRM */
+
+ haddr_t eoa; /* End-of-allocation in the file */
+ H5FD_mem_t cooked_type;
+
+ /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
+ * type to H5FD_MEM_DRAW via its call to H5F__accum_read().
+ * Thus we do the same for purposes of computing the eoa
+ * for sanity checks.
+ */
+ cooked_type =
+ (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type;
+
+ /* Get the file's end-of-allocation value */
+ eoa = H5F_get_eoa(f, cooked_type);
+
+ HDassert(H5F_addr_defined(eoa));
+
+ /* Check for bad address in general */
+ if ( H5F_addr_gt(addr, eoa) )
- if(NULL == (thing = (type->load)(f, dxpl_id, addr, udata)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, \
+ "address of object past end of allocation")
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "unable to load entry")
+ /* Check if the amount of data to read will be past the eoa */
+ if( H5F_addr_gt((addr + len), eoa) ) {
+
+ /* Trim down the length of the metadata */
+
+ /* Note that for some cache clients, this will cause an
+ * assertion failure. JRM -- 8/29/14
+ */
+ len = (size_t)(eoa - addr);
+ }
+
+ if ( len <= 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, \
+ "len not positive after adjustment for EOA.")
+
+#endif /* modified code */ /* JRM */
+ }
+ /* Allocate the buffer for reading the on-disk entry image */
+ if(NULL == (image = H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, \
+ "memory allocation failed for on disk image buffer.")
+
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ HDmemcpy(((uint8_t *)image) + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+ /* Get the on-disk entry image */
+ if ( 0 == (type->flags & H5C__CLASS_SKIP_READS) )
+ if(H5F_block_read(f, type->mem_type, addr, len, dxpl_id, image) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
+
+ /* Deserialize the on-disk image into the native memory form */
+ if(NULL == (thing = type->deserialize(image, len, udata, &dirty)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
+
+ /* If the client's cache has an image_len callback, check it */
+ if(type->image_len) {
+ size_t new_len; /* New size of on-disk image */
+
+ /* set magic and type field in *entry_ptr. While the image_len
+ * callback shouldn't touch the cache specific fields, it may check
+ * these fields to ensure that it it has received the expected
+ * value.
+ *
+ * Note that this initialization is repeated below on the off
+ * chance that we had to re-try the deserialization.
+ */
+ entry = (H5C_cache_entry_t *)thing;
+ entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ entry->type = type;
+
+ /* verify that compressed and compressed_len are initialized */
+ HDassert(compressed == FALSE);
+ HDassert(compressed_size == 0);
+
+ /* Get the actual image size for the thing */
+ if(type->image_len(thing, &new_len, &compressed, &compressed_size) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, \
+ "can't retrieve image length")
+
+ if(new_len == 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "image length is 0")
+
+ HDassert(((type->flags & H5C__CLASS_COMPRESSED_FLAG) != 0) ||
+ ((compressed == FALSE) && (compressed_size == 0)));
+ HDassert((compressed == TRUE) || (compressed_size == 0));
+
+ if(new_len != len) {
+
+ if(type->flags & H5C__CLASS_COMPRESSED_FLAG) {
+
+ /* if new_len != len, then compression must be
+ * enabled on the entry. In this case, the image_len
+ * callback should have set compressed to TRUE, set
+ * new_len equal to the uncompressed size of the
+ * entry, and compressed_len equal to the compressed
+ * size -- which must equal len.
+ *
+ * We can't verify the uncompressed size, but we can
+ * verify the rest with the following assertions.
+ */
+ HDassert(compressed);
+ HDassert(compressed_size == len);
+
+ /* new_len should contain the uncompressed size. Set len
+ * equal to new_len, so that the cache will use the
+ * uncompressed size for purposes of space allocation, etc.
+ */
+ len = new_len;
+ } else if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) {
+
+ void *new_image; /* Buffer for disk image */
+
+ /* compressed must be FALSE, and compressed_size
+ * must be zero.
+ */
+ HDassert(!compressed);
+ HDassert(compressed_size == 0);
+
+ /* Adjust the size of the image to match new_len */
+ if(NULL == (new_image = H5MM_realloc(image,
+ new_len + H5C_IMAGE_EXTRA_SPACE)))
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, \
+ "image null after H5MM_realloc()")
+
+ image = new_image;
+
+#if H5C_DO_MEMORY_SANITY_CHECKS
+
+ HDmemcpy(((uint8_t *)image) + new_len,
+ H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+ /* If the thing's image needs to be bigger for a speculatively
+ * loaded thing, free the thing and retry with new length
+ */
+ if (new_len > len) {
+
+ /* Release previous (possibly partially initialized)
+ * thing. Note that we must set entry->magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC and set one or
+ * two other fields before the call to free_icr
+ * so as to avoid sanity check failures.
+ */
+ entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
+
+ entry->addr = addr;
+
+ if ( type->free_icr(thing) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, \
+ "free_icr callback failed")
+
+ /* Go get the on-disk image again */
+ if(H5F_block_read(f, type->mem_type, addr,
+ new_len, dxpl_id, image) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, \
+ "Can't read image")
+
+ /* Deserialize on-disk image into native memory
+ * form again
+ */
+ if(NULL == (thing = type->deserialize(image, new_len,
+ udata, &dirty)))
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, \
+ "Can't deserialize image")
+
+#ifndef NDEBUG
+ {
+ /* new_compressed and new_compressed_size must be
+ * initialize to FALSE / 0 respectively, as clients
+ * that don't use compression may ignore these two
+ * parameters.
+ */
+ hbool_t new_compressed = FALSE;
+ size_t new_compressed_size = 0;
+ size_t new_new_len;
+
+ /* Get the actual image size for the thing again. Note
+ * that since this is a new thing, we have to set
+ * the magic and type fields again so as to avoid
+ * failing sanity checks.
+ */
+ entry = (H5C_cache_entry_t *)thing;
+ entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ entry->type = type;
+
+ type->image_len(thing, &new_new_len, &new_compressed, &new_compressed_size);
+ HDassert(new_new_len == new_len);
+ HDassert(!new_compressed);
+ HDassert(new_compressed_size == 0);
+ }
+#endif /* NDEBUG */
+ } /* end if (new_len > len) */
+
+ /* Retain adjusted size */
+ len = new_len;
+
+ } else { /* throw an error */
+
+ HGOTO_ERROR(H5E_CACHE, H5E_UNSUPPORTED, NULL, \
+ "size of non-speculative, non-compressed object changed")
+ }
+ } /* end if (new_len != len) */
+ } /* end if */
entry = (H5C_cache_entry_t *)thing;
@@ -8680,35 +9981,39 @@ H5C_load_entry(H5F_t * f,
*
* However, when this code is used in the metadata cache, it is
* possible that object headers will be dirty at this point, as
- * the load function will alter object headers if necessary to
+ * the deserialize function will alter object headers if necessary to
* fix an old bug.
*
- * To support this bug fix, I have replace the old assert:
- *
- * HDassert( entry->is_dirty == FALSE );
+ * In the following assert:
*
- * with:
+ * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
*
- * HDassert( ( entry->is_dirty == FALSE ) || ( type->id == 5 ) );
- *
- * Note that type id 5 is associated with object headers in the metadata
- * cache.
+ * note that type ids 5 & 6 are associated with object headers in the
+ * metadata cache.
*
* When we get to using H5C for other purposes, we may wish to
* tighten up the assert so that the loophole only applies to the
* metadata cache.
*/
- HDassert( ( entry->is_dirty == FALSE ) || ( type->id == 5 ) );
-#ifndef NDEBUG
+ HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6) );
+
entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
-#endif /* NDEBUG */
entry->cache_ptr = f->shared->cache;
entry->addr = addr;
+ entry->size = len;
+ HDassert(entry->size < H5C_MAX_ENTRY_SIZE);
+ entry->compressed = compressed;
+ entry->compressed_size = compressed_size;
+ entry->image_ptr = image;
+ entry->image_up_to_date = TRUE;
entry->type = type;
+ entry->is_dirty = dirty;
+ entry->dirtied = FALSE;
entry->is_protected = FALSE;
entry->is_read_only = FALSE;
entry->ro_ref_count = 0;
+ entry->is_pinned = FALSE;
entry->in_slist = FALSE;
entry->flush_marker = FALSE;
#ifdef H5_HAVE_PARALLEL
@@ -8717,13 +10022,6 @@ H5C_load_entry(H5F_t * f,
#endif /* H5_HAVE_PARALLEL */
entry->flush_in_progress = FALSE;
entry->destroy_in_progress = FALSE;
- entry->free_file_space_on_destroy = FALSE;
-
- if((type->size)(f, thing, &(entry->size)) < 0)
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, NULL, "Can't get size of thing")
-
- HDassert( entry->size < H5C_MAX_ENTRY_SIZE );
/* Initialize flush dependency height fields */
entry->flush_dep_parent = NULL;
@@ -8744,6 +10042,18 @@ H5C_load_entry(H5F_t * f,
ret_value = thing;
done:
+ /* Cleanup on error */
+ if(NULL == ret_value) {
+
+ /* Release resources */
+ if ( thing && type->free_icr(thing) < 0 )
+
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, \
+ "free_icr callback failed")
+
+ if(image)
+ image = H5MM_xfree(image);
+ } /* end if */
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_load_entry() */
@@ -8782,36 +10092,45 @@ done:
*
* Programmer: John Mainzer, 5/14/04
*
- * JRM -- 11/13/08
- * Modified function to always observe the min_clean_size
- * whether we are maintaining the clean and dirt LRU lists
- * or not. To do this, we had to add the new clean_index_size
- * and dirty_index_size fields to H5C_t, and supporting code
- * as needed throughout the cache.
+ * Changes: Modified function to skip over entries with the
+ * flush_in_progress flag set. If this is not done,
+ * an infinite recursion is possible if the cache is
+ * full, and the pre-serialize or serialize routine
+ * attempts to load another entry.
*
- * The purpose of this modification is to avoid "metadata
- * blizzards" in the write only case. In such instances,
- * the cache was allowed to fill with dirty metadata. When
- * we finally needed to evict an entry to make space, we had
- * to flush out a whole cache full of metadata -- which has
- * interesting performance effects. We hope to avoid (or
- * perhaps more accurately hide) this effect by maintaining
- * the min_clean_size, which should force us to start flushing
- * entries long before we actually have to evict something
- * to make space.
+ * This error was exposed by a re-factor of the
+ * H5C_flush_single_entry() routine. However, it was
+ * a potential bug from the moment that entries were
+ * allowed to load other entries on flush.
+ *
+ * In passing, note that the primary and secondary dxpls
+ * mentioned in the comment above have been replaced by
+ * a single dxpl at some point, and thus the discussion
+ * above is somewhat obsolete. Date of this change is
+ * unkown.
+ *
+ * JRM -- 12/26/14
+ *
+ * Modified function to detect deletions of entries
+ * during a scan of the LRU, and where appropriate,
+ * restart the scan to avoid proceeding with a next
+ * entry that is no longer in the cache.
+ *
+ * Note the absence of checks after flushes of clean
+ * entries. As a second entry can only be removed by
+ * by a call to the pre_serialize or serialize callback
+ * of the first, and as these callbacks will not be called
+ * on clean entries, no checks are needed.
+ *
+ * JRM -- 4/6/15
*
- * MAM -- 01/06/09
- * Added code to maintain clean_entries_skipped and total_entries
- * scanned statistics.
*-------------------------------------------------------------------------
*/
static herr_t
H5C_make_space_in_cache(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
+ hid_t dxpl_id,
size_t space_needed,
- hbool_t write_permitted,
- hbool_t * first_flush_ptr)
+ hbool_t write_permitted)
{
H5C_t * cache_ptr = f->shared->cache;
herr_t result;
@@ -8824,6 +10143,7 @@ H5C_make_space_in_cache(H5F_t * f,
size_t empty_space;
hbool_t prev_is_dirty = FALSE;
hbool_t didnt_flush_entry = FALSE;
+ hbool_t restart_scan;
H5C_cache_entry_t * entry_ptr;
H5C_cache_entry_t * prev_ptr;
H5C_cache_entry_t * next_ptr;
@@ -8834,13 +10154,12 @@ H5C_make_space_in_cache(H5F_t * f,
HDassert( f );
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( first_flush_ptr != NULL );
- HDassert( ( *first_flush_ptr == TRUE ) || ( *first_flush_ptr == FALSE ) );
HDassert( cache_ptr->index_size ==
(cache_ptr->clean_index_size + cache_ptr->dirty_index_size) );
if ( write_permitted ) {
+ restart_scan = FALSE;
initial_list_len = cache_ptr->LRU_list_len;
entry_ptr = cache_ptr->LRU_tail_ptr;
@@ -8871,6 +10190,7 @@ H5C_make_space_in_cache(H5F_t * f,
( entry_ptr != NULL )
)
{
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert( ! (entry_ptr->is_protected) );
HDassert( ! (entry_ptr->is_read_only) );
HDassert( (entry_ptr->ro_ref_count) == 0 );
@@ -8883,7 +10203,8 @@ H5C_make_space_in_cache(H5F_t * f,
prev_is_dirty = prev_ptr->is_dirty;
}
- if ( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) {
+ if ( ( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) &&
+ ( ! entry_ptr->flush_in_progress ) ) {
didnt_flush_entry = FALSE;
@@ -8898,14 +10219,29 @@ H5C_make_space_in_cache(H5F_t * f,
}
#endif /* H5C_COLLECT_CACHE_STATS */
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C_flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
+
result = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- entry_ptr->type,
+ dxpl_id,
entry_ptr->addr,
H5C__NO_FLAGS_SET,
- first_flush_ptr,
- FALSE);
+ FALSE,
+ NULL);
+
+ if ( ( cache_ptr->entries_removed_counter > 1 ) ||
+ ( cache_ptr->last_entry_removed_ptr == prev_ptr ) )
+
+ restart_scan = TRUE;
+
} else if ( (cache_ptr->index_size + space_needed)
>
cache_ptr->max_cache_size ) {
@@ -8914,13 +10250,12 @@ H5C_make_space_in_cache(H5F_t * f,
#endif /* H5C_COLLECT_CACHE_STATS */
result = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- entry_ptr->type,
+ dxpl_id,
entry_ptr->addr,
H5C__FLUSH_INVALIDATE_FLAG,
- first_flush_ptr,
- TRUE);
+ TRUE,
+ NULL);
+
} else {
/* We have enough space so don't flush clean entry.
@@ -8941,7 +10276,8 @@ H5C_make_space_in_cache(H5F_t * f,
} else {
- /* Skip epoch markers. Set result to SUCCEED to avoid
+ /* Skip epoch markers and entries that are in the process
+ * of being flushed. Set result to SUCCEED to avoid
* triggering the error code below.
*/
didnt_flush_entry = TRUE;
@@ -8955,26 +10291,20 @@ H5C_make_space_in_cache(H5F_t * f,
}
if ( prev_ptr != NULL ) {
-#ifndef NDEBUG
- if ( prev_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC ) {
-
- /* something horrible has happened to *prev_ptr --
- * scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "*prev_ptr corrupt 1")
- }
-#endif /* NDEBUG */
if ( didnt_flush_entry ) {
- /* epoch markers don't get flushed, so the sanity checks
- * on normal entries will fail -- thus just set entry_ptr
- * to prev_ptr and go on.
+ /* epoch markers don't get flushed, and we don't touch
+ * entries that are in the process of being flushed.
+ * Hence no need for sanity checks, as we haven't
+ * flushed anything. Thus just set entry_ptr to prev_ptr
+ * and go on.
*/
entry_ptr = prev_ptr;
- } else if ( ( prev_ptr->is_dirty != prev_is_dirty )
+ } else if ( ( restart_scan )
+ ||
+ ( prev_ptr->is_dirty != prev_is_dirty )
||
( prev_ptr->next != next_ptr )
||
@@ -8985,7 +10315,9 @@ H5C_make_space_in_cache(H5F_t * f,
/* something has happened to the LRU -- start over
* from the tail.
*/
+ restart_scan = FALSE;
entry_ptr = cache_ptr->LRU_tail_ptr;
+ H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
} else {
@@ -9073,13 +10405,11 @@ H5C_make_space_in_cache(H5F_t * f,
prev_ptr = entry_ptr->aux_prev;
result = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- entry_ptr->type,
+ dxpl_id,
entry_ptr->addr,
H5C__FLUSH_INVALIDATE_FLAG,
- first_flush_ptr,
- TRUE);
+ TRUE,
+ NULL);
if ( result < 0 ) {
@@ -9087,6 +10417,12 @@ H5C_make_space_in_cache(H5F_t * f,
"unable to flush entry")
}
+ /* we are scanning the clean LRU, so the serialize function
+ * will not be called on any entry -- thus there is no
+ * concern about the list being modified out from under
+ * this function.
+ */
+
entry_ptr = prev_ptr;
entries_examined++;
}
@@ -9499,6 +10835,66 @@ done:
/*-------------------------------------------------------------------------
*
+ * Function: H5C_entry_in_skip_list
+ *
+ * Purpose: Debugging function that scans skip list to see if it
+ * is in present. We need this, as it is possible for
+ * an entry to be in the skip list twice.
+ *
+ * Return: FALSE if the entry is not in the skip list, and TRUE
+ * if it is.
+ *
+ * Programmer: John Mainzer, 11/1/14
+ *
+ * Changes:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+#if H5C_DO_SLIST_SANITY_CHECKS
+
+static hbool_t
+H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t *target_ptr)
+{
+ hbool_t in_slist = FALSE;
+ H5SL_node_t * node_ptr = NULL;
+ H5C_cache_entry_t * entry_ptr = NULL;
+
+ HDassert( cache_ptr );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( cache_ptr->slist_ptr );
+
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+
+ while ( ( node_ptr != NULL ) && ( ! in_slist ) )
+ {
+ entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
+ HDassert( entry_ptr );
+ HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
+ HDassert( entry_ptr->is_dirty );
+ HDassert( entry_ptr->in_slist );
+
+ if ( entry_ptr == target_ptr ) {
+
+ in_slist = TRUE;
+
+ } else {
+
+ node_ptr = H5SL_next(node_ptr);
+ }
+ }
+
+ return(in_slist);
+
+} /* H5C_entry_in_skip_list() */
+
+#endif /* H5C_DO_SLIST_SANITY_CHECKS */
+
+
+/*-------------------------------------------------------------------------
+ *
* Function: H5C_get_entry_ptr_from_addr()
*
* Purpose: Debugging function that attempts to look up an entry in the
@@ -9801,7 +11197,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5C_flush_tagged_entries(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, H5C_t * cache_ptr, haddr_t tag)
+H5C_flush_tagged_entries(H5F_t * f, hid_t dxpl_id, H5C_t * cache_ptr, haddr_t tag)
{
herr_t ret_value = SUCCEED;
@@ -9817,7 +11213,7 @@ H5C_flush_tagged_entries(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't mark tagged entries")
/* Flush all marked entries */
- if(H5C_flush_marked_entries(f, primary_dxpl_id, secondary_dxpl_id, cache_ptr) < 0)
+ if(H5C_flush_marked_entries(f, dxpl_id, cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush marked entries")
done:
@@ -9890,7 +11286,7 @@ H5C_mark_tagged_entries(H5C_t * cache_ptr, haddr_t tag)
*-------------------------------------------------------------------------
*/
static herr_t
-H5C_flush_marked_entries(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, H5C_t * cache_ptr)
+H5C_flush_marked_entries(H5F_t * f, hid_t dxpl_id, H5C_t * cache_ptr)
{
herr_t ret_value = SUCCEED;
@@ -9902,8 +11298,7 @@ H5C_flush_marked_entries(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
/* Flush all marked entries */
- if(H5C_flush_cache(f, primary_dxpl_id, secondary_dxpl_id,
- H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
+ if(H5C_flush_cache(f, dxpl_id, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
done:
@@ -9946,7 +11341,7 @@ H5C_verify_tag(int id, haddr_t tag)
* constraints are met. */
/* Superblock */
- if(id == H5AC_SUPERBLOCK_ID) {
+ if((id == H5AC_SUPERBLOCK_ID) || (id == H5AC_DRVRINFO_ID)) {
if(tag != H5AC__SUPERBLOCK_TAG)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "superblock not tagged with H5AC__SUPERBLOCK_TAG")
}
@@ -10018,10 +11413,9 @@ H5C_retag_copied_metadata(H5C_t * cache_ptr, haddr_t metadata_tag)
next_entry_ptr = cache_ptr->index[u];
while(next_entry_ptr != NULL) {
- if(cache_ptr->index[u] != NULL) {
+ if(cache_ptr->index[u] != NULL)
if((cache_ptr->index[u])->tag == H5AC__COPIED_TAG)
(cache_ptr->index[u])->tag = metadata_tag;
- } /* end if */
next_entry_ptr = next_entry_ptr->ht_next;
} /* end while */
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 7c278e8..988dfff 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -35,14 +35,16 @@
#ifndef _H5Cpkg_H
#define _H5Cpkg_H
-
/* Get package's private header */
#include "H5Cprivate.h"
-
-/* Get needed headers */
+/* Other private headers needed by this file */
#include "H5SLprivate.h" /* Skip lists */
+/**************************/
+/* Package Private Macros */
+/**************************/
+
/* With the introduction of the fractal heap, it is now possible for
* entries to be dirtied, resized, and/or moved in the flush callbacks.
* As a result, on flushes, it may be necessary to make multiple passes
@@ -53,980 +55,14 @@
*
* -- JRM
*/
-
#define H5C__MAX_PASSES_ON_FLUSH 4
-
-
-/****************************************************************************
- *
- * structure H5C_t
- *
- * Catchall structure for all variables specific to an instance of the cache.
- *
- * While the individual fields of the structure are discussed below, the
- * following overview may be helpful.
- *
- * Entries in the cache are stored in an instance of H5TB_TREE, indexed on
- * the entry's disk address. While the H5TB_TREE is less efficient than
- * hash table, it keeps the entries in address sorted order. As flushes
- * in parallel mode are more efficient if they are issued in increasing
- * address order, this is a significant benefit. Also the H5TB_TREE code
- * was readily available, which reduced development time.
- *
- * While the cache was designed with multiple replacement policies in mind,
- * at present only a modified form of LRU is supported.
- *
- * JRM - 4/26/04
- *
- * Profiling has indicated that searches in the instance of H5TB_TREE are
- * too expensive. To deal with this issue, I have augmented the cache
- * with a hash table in which all entries will be stored. Given the
- * advantages of flushing entries in increasing address order, the TBBT
- * is retained, but only dirty entries are stored in it. At least for
- * now, we will leave entries in the TBBT after they are flushed.
- *
- * Note that index_size and index_len now refer to the total size of
- * and number of entries in the hash table.
- *
- * JRM - 7/19/04
- *
- * The TBBT has since been replaced with a skip list. This change
- * greatly predates this note.
- *
- * JRM - 9/26/05
- *
- * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC.
- * This field is used to validate pointers to instances of
- * H5C_t.
- *
- * flush_in_progress: Boolean flag indicating whether a flush is in
- * progress.
- *
- * trace_file_ptr: File pointer pointing to the trace file, which is used
- * to record cache operations for use in simulations and design
- * studies. This field will usually be NULL, indicating that
- * no trace file should be recorded.
- *
- * Since much of the code supporting the parallel metadata
- * cache is in H5AC, we don't write the trace file from
- * H5C. Instead, H5AC reads the trace_file_ptr as needed.
- *
- * When we get to using H5C in other places, we may add
- * code to write trace file data at the H5C level as well.
- *
- * aux_ptr: Pointer to void used to allow wrapper code to associate
- * its data with an instance of H5C_t. The H5C cache code
- * sets this field to NULL, and otherwise leaves it alone.
- *
- * max_type_id: Integer field containing the maximum type id number assigned
- * to a type of entry in the cache. All type ids from 0 to
- * max_type_id inclusive must be defined. The names of the
- * types are stored in the type_name_table discussed below, and
- * indexed by the ids.
- *
- * type_name_table_ptr: Pointer to an array of pointer to char of length
- * max_type_id + 1. The strings pointed to by the entries
- * in the array are the names of the entry types associated
- * with the indexing type IDs.
- *
- * max_cache_size: Nominal maximum number of bytes that may be stored in the
- * cache. This value should be viewed as a soft limit, as the
- * cache can exceed this value under the following circumstances:
- *
- * a) All entries in the cache are protected, and the cache is
- * asked to insert a new entry. In this case the new entry
- * will be created. If this causes the cache to exceed
- * max_cache_size, it will do so. The cache will attempt
- * to reduce its size as entries are unprotected.
- *
- * b) When running in parallel mode, the cache may not be
- * permitted to flush a dirty entry in response to a read.
- * If there are no clean entries available to evict, the
- * cache will exceed its maximum size. Again the cache
- * will attempt to reduce its size to the max_cache_size
- * limit on the next cache write.
- *
- * c) When an entry increases in size, the cache may exceed
- * the max_cache_size limit until the next time the cache
- * attempts to load or insert an entry.
- *
- * min_clean_size: Nominal minimum number of clean bytes in the cache.
- * The cache attempts to maintain this number of bytes of
- * clean data so as to avoid case b) above. Again, this is
- * a soft limit.
- *
- *
- * In addition to the call back functions required for each entry, the
- * cache requires the following call back functions for this instance of
- * the cache as a whole:
- *
- * check_write_permitted: In certain applications, the cache may not
- * be allowed to write to disk at certain time. If specified,
- * the check_write_permitted function is used to determine if
- * a write is permissible at any given point in time.
- *
- * If no such function is specified (i.e. this field is NULL),
- * the cache uses the following write_permitted field to
- * determine whether writes are permitted.
- *
- * write_permitted: If check_write_permitted is NULL, this boolean flag
- * indicates whether writes are permitted.
- *
- * log_flush: If provided, this function is called whenever a dirty
- * entry is flushed to disk.
- *
- *
- * In cases where memory is plentiful, and performance is an issue, it may
- * be useful to disable all cache evictions, and thereby postpone metadata
- * writes. The following field is used to implement this.
- *
- * evictions_enabled: Boolean flag that is initialized to TRUE. When
- * this flag is set to FALSE, the metadata cache will not
- * attempt to evict entries to make space for newly protected
- * entries, and instead the will grow without limit.
- *
- * Needless to say, this feature must be used with care.
- *
- *
- * The cache requires an index to facilitate searching for entries. The
- * following fields support that index.
- *
- * index_len: Number of entries currently in the hash table used to index
- * the cache.
- *
- * index_size: Number of bytes of cache entries currently stored in the
- * hash table used to index the cache.
- *
- * This value should not be mistaken for footprint of the
- * cache in memory. The average cache entry is small, and
- * the cache has a considerable overhead. Multiplying the
- * index_size by two should yield a conservative estimate
- * of the cache's memory footprint.
- *
- * clean_index_size: Number of bytes of clean entries currently stored in
- * the hash table. Note that the index_size field (above)
- * is also the sum of the sizes of all entries in the cache.
- * Thus we should have the invarient that clean_index_size +
- * dirty_index_size == index_size.
- *
- * WARNING:
- *
- * 1) The clean_index_size field is not maintained by the
- * index macros, as the hash table doesn't care whether
- * the entry is clean or dirty. Instead the field is
- * maintained in the H5C__UPDATE_RP macros.
- *
- * 2) The value of the clean_index_size must not be mistaken
- * for the current clean size of the cache. Rather, the
- * clean size of the cache is the current value of
- * clean_index_size plus the amount of empty space (if any)
- * in the cache.
- *
- * dirty_index_size: Number of bytes of dirty entries currently stored in
- * the hash table. Note that the index_size field (above)
- * is also the sum of the sizes of all entries in the cache.
- * Thus we should have the invarient that clean_index_size +
- * dirty_index_size == index_size.
- *
- * WARNING:
- *
- * 1) The dirty_index_size field is not maintained by the
- * index macros, as the hash table doesn't care whether
- * the entry is clean or dirty. Instead the field is
- * maintained in the H5C__UPDATE_RP macros.
- *
- * index: Array of pointer to H5C_cache_entry_t of size
- * H5C__HASH_TABLE_LEN. At present, this value is a power
- * of two, not the usual prime number.
- *
- * I hope that the variable size of cache elements, the large
- * hash table size, and the way in which HDF5 allocates space
- * will combine to avoid problems with periodicity. If so, we
- * can use a trivial hash function (a bit-and and a 3 bit left
- * shift) with some small savings.
- *
- * If not, it will become evident in the statistics. Changing
- * to the usual prime number length hash table will require
- * changing the H5C__HASH_FCN macro and the deletion of the
- * H5C__HASH_MASK #define. No other changes should be required.
- *
- *
- * When we flush the cache, we need to write entries out in increasing
- * address order. An instance of a skip list is used to store dirty entries in
- * sorted order. Whether it is cheaper to sort the dirty entries as needed,
- * or to maintain the list is an open question. At a guess, it depends
- * on how frequently the cache is flushed. We will see how it goes.
- *
- * For now at least, I will not remove dirty entries from the list as they
- * are flushed. (this has been changed -- dirty entries are now removed from
- * the skip list as they are flushed. JRM - 10/25/05)
- *
- * slist_len: Number of entries currently in the skip list
- * used to maintain a sorted list of dirty entries in the
- * cache.
- *
- * slist_size: Number of bytes of cache entries currently stored in the
- * skip list used to maintain a sorted list of
- * dirty entries in the cache.
- *
- * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted
- * list of dirty entries in the cache. This sorted list has
- * two uses:
- *
- * a) It allows us to flush dirty entries in increasing address
- * order, which results in significant savings.
- *
- * b) It facilitates checking for adjacent dirty entries when
- * attempting to evict entries from the cache. While we
- * don't use this at present, I hope that this will allow
- * some optimizations when I get to it.
- *
- * num_last_entries: The number of entries in the cache that can only be
- * flushed after all other entries in the cache have
- * been flushed. At this time, this will only ever be
- * one entry (the superblock), and the code has been
- * protected with HDasserts to enforce this. This restraint
- * can certainly be relaxed in the future if the need for
- * multiple entries being flushed last arises, though
- * explicit tests for that case should be added when said
- * HDasserts are removed.
- *
- * With the addition of the fractal heap, the cache must now deal with
- * the case in which entries may be dirtied, moved, or have their sizes
- * changed during a flush. To allow sanity checks in this situation, the
- * following two fields have been added. They are only compiled in when
- * H5C_DO_SANITY_CHECKS is TRUE.
- *
- * slist_len_increase: Number of entries that have been added to the
- * slist since the last time this field was set to zero.
- *
- * slist_size_increase: Total size of all entries that have been added
- * to the slist since the last time this field was set to
- * zero.
- *
- *
- * When a cache entry is protected, it must be removed from the LRU
- * list(s) as it cannot be either flushed or evicted until it is unprotected.
- * The following fields are used to implement the protected list (pl).
- *
- * pl_len: Number of entries currently residing on the protected list.
- *
- * pl_size: Number of bytes of cache entries currently residing on the
- * protected list.
- *
- * pl_head_ptr: Pointer to the head of the doubly linked list of protected
- * entries. Note that cache entries on this list are linked
- * by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected
- * entries. Note that cache entries on this list are linked
- * by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- *
- * For very frequently used entries, the protect/unprotect overhead can
- * become burdensome. To avoid this overhead, I have modified the cache
- * to allow entries to be "pinned". A pinned entry is similar to a
- * protected entry, in the sense that it cannot be evicted, and that
- * the entry can be modified at any time.
- *
- * Pinning an entry has the following implications:
- *
- * 1) A pinned entry cannot be evicted. Thus unprotected
- * pinned entries reside in the pinned entry list, instead
- * of the LRU list(s) (or other lists maintained by the current
- * replacement policy code).
- *
- * 2) A pinned entry can be accessed or modified at any time.
- * Therefore, the cache must check with the entry owner
- * before flushing it. If permission is denied, the
- * cache just skips the entry in the flush.
- *
- * 3) A pinned entry can be marked as dirty (and possibly
- * change size) while it is unprotected.
- *
- * 4) The flush-destroy code must allow pinned entries to
- * be unpinned (and possibly unprotected) during the
- * flush.
- *
- * Since pinned entries cannot be evicted, they must be kept on a pinned
- * entry list (pel), instead of being entrusted to the replacement policy
- * code.
- *
- * Maintaining the pinned entry list requires the following fields:
- *
- * pel_len: Number of entries currently residing on the pinned
- * entry list.
- *
- * pel_size: Number of bytes of cache entries currently residing on
- * the pinned entry list.
- *
- * pel_head_ptr: Pointer to the head of the doubly linked list of pinned
- * but not protected entries. Note that cache entries on
- * this list are linked by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned
- * but not protected entries. Note that cache entries on
- * this list are linked by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- *
- * The cache must have a replacement policy, and the fields supporting this
- * policy must be accessible from this structure.
- *
- * While there has been interest in several replacement policies for
- * this cache, the initial development schedule is tight. Thus I have
- * elected to support only a modified LRU (least recently used) policy
- * for the first cut.
- *
- * To further simplify matters, I have simply included the fields needed
- * by the modified LRU in this structure. When and if we add support for
- * other policies, it will probably be easiest to just add the necessary
- * fields to this structure as well -- we only create one instance of this
- * structure per file, so the overhead is not excessive.
- *
- *
- * Fields supporting the modified LRU policy:
- *
- * See most any OS text for a discussion of the LRU replacement policy.
- *
- * When operating in parallel mode, we must ensure that a read does not
- * cause a write. If it does, the process will hang, as the write will
- * be collective and the other processes will not know to participate.
- *
- * To deal with this issue, I have modified the usual LRU policy by adding
- * clean and dirty LRU lists to the usual LRU list.
- *
- * The clean LRU list is simply the regular LRU list with all dirty cache
- * entries removed.
- *
- * Similarly, the dirty LRU list is the regular LRU list with all the clean
- * cache entries removed.
- *
- * When reading in parallel mode, we evict from the clean LRU list only.
- * This implies that we must try to ensure that the clean LRU list is
- * reasonably well stocked at all times.
- *
- * We attempt to do this by trying to flush enough entries on each write
- * to keep the cLRU_list_size >= min_clean_size.
- *
- * Even if we start with a completely clean cache, a sequence of protects
- * without unprotects can empty the clean LRU list. In this case, the
- * cache must grow temporarily. At the next write, we will attempt to
- * evict enough entries to reduce index_size to less than max_cache_size.
- * While this will usually be possible, all bets are off if enough entries
- * are protected.
- *
- * Discussions of the individual fields used by the modified LRU replacement
- * policy follow:
- *
- * LRU_list_len: Number of cache entries currently on the LRU list.
- *
- * Observe that LRU_list_len + pl_len must always equal
- * index_len.
- *
- * LRU_list_size: Number of bytes of cache entries currently residing on the
- * LRU list.
- *
- * Observe that LRU_list_size + pl_size must always equal
- * index_size.
- *
- * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache
- * entries on this list are linked by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache
- * entries on this list are linked by their next and prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * cLRU_list_len: Number of cache entries currently on the clean LRU list.
- *
- * Observe that cLRU_list_len + dLRU_list_len must always
- * equal LRU_list_len.
- *
- * cLRU_list_size: Number of bytes of cache entries currently residing on
- * the clean LRU list.
- *
- * Observe that cLRU_list_size + dLRU_list_size must always
- * equal LRU_list_size.
- *
- * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list.
- * Cache entries on this list are linked by their aux_next and
- * aux_prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list.
- * Cache entries on this list are linked by their aux_next and
- * aux_prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * dLRU_list_len: Number of cache entries currently on the dirty LRU list.
- *
- * Observe that cLRU_list_len + dLRU_list_len must always
- * equal LRU_list_len.
- *
- * dLRU_list_size: Number of cache entries currently on the dirty LRU list.
- *
- * Observe that cLRU_list_len + dLRU_list_len must always
- * equal LRU_list_len.
- *
- * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list.
- * Cache entries on this list are linked by their aux_next and
- * aux_prev fields.
- *
- * This field is NULL if the list is empty.
- *
- * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list.
- * Cache entries on this list are linked by their aux_next and
- * aux_prev fields.
- *
- * This field is NULL if the list is empty.
- *
- *
- * Automatic cache size adjustment:
- *
- * While the default cache size is adequate for most cases, we can run into
- * cases where the default is too small. Ideally, we will let the user
- * adjust the cache size as required. However, this is not possible in all
- * cases. Thus I have added automatic cache size adjustment code.
- *
- * The configuration for the automatic cache size adjustment is stored in
- * the structure described below:
- *
- * size_increase_possible: Depending on the configuration data given
- * in the resize_ctl field, it may or may not be possible
- * to increase the size of the cache. Rather than test for
- * all the ways this can happen, we simply set this flag when
- * we receive a new configuration.
- *
- * flash_size_increase_possible: Depending on the configuration data given
- * in the resize_ctl field, it may or may not be possible
- * for a flash size increase to occur. We set this flag
- * whenever we receive a new configuration so as to avoid
- * repeated calculations.
- *
- * flash_size_increase_threshold: If a flash cache size increase is possible,
- * this field is used to store the minimum size of a new entry
- * or size increase needed to trigger a flash cache size
- * increase. Note that this field must be updated whenever
- * the size of the cache is changed.
- *
- * size_decrease_possible: Depending on the configuration data given
- * in the resize_ctl field, it may or may not be possible
- * to decrease the size of the cache. Rather than test for
- * all the ways this can happen, we simply set this flag when
- * we receive a new configuration.
- *
- * cache_full: Boolean flag used to keep track of whether the cache is
- * full, so we can refrain from increasing the size of a
- * cache which hasn't used up the space allotted to it.
- *
- * The field is initialized to FALSE, and then set to TRUE
- * whenever we attempt to make space in the cache.
- *
- * resize_enabled: This is another convenience flag which is set whenever
- * a new set of values for resize_ctl are provided. Very
- * simply,
- *
- * resize_enabled = size_increase_possible ||
- * size_decrease_possible;
- *
- * size_decreased: Boolean flag set to TRUE whenever the maximum cache
- * size is decreased. The flag triggers a call to
- * H5C_make_space_in_cache() on the next call to H5C_protect().
- *
- * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
- * data for automatic cache resizing.
- *
- * epoch_markers_active: Integer field containing the number of epoch
- * markers currently in use in the LRU list. This value
- * must be in the range [0, H5C__MAX_EPOCH_MARKERS - 1].
- *
- * epoch_marker_active: Array of boolean of length H5C__MAX_EPOCH_MARKERS.
- * This array is used to track which epoch markers are currently
- * in use.
- *
- * epoch_marker_ringbuf: Array of int of length H5C__MAX_EPOCH_MARKERS + 1.
- *
- * To manage the epoch marker cache entries, it is necessary
- * to track their order in the LRU list. This is done with
- * epoch_marker_ringbuf. When markers are inserted at the
- * head of the LRU list, the index of the marker in the
- * epoch_markers array is inserted at the tail of the ring
- * buffer. When it becomes the epoch_marker_active'th marker
- * in the LRU list, it will have worked its way to the head
- * of the ring buffer as well. This allows us to remove it
- * without scanning the LRU list if such is required.
- *
- * epoch_marker_ringbuf_first: Integer field containing the index of the
- * first entry in the ring buffer.
- *
- * epoch_marker_ringbuf_last: Integer field containing the index of the
- * last entry in the ring buffer.
- *
- * epoch_marker_ringbuf_size: Integer field containing the number of entries
- * in the ring buffer.
- *
- * epoch_markers: Array of instances of H5C_cache_entry_t of length
- * H5C__MAX_EPOCH_MARKERS. The entries are used as markers
- * in the LRU list to identify cache entries that haven't
- * been accessed for some (small) specified number of
- * epochs. These entries (if any) can then be evicted and
- * the cache size reduced -- ideally without evicting any
- * of the current working set. Needless to say, the epoch
- * length and the number of epochs before an unused entry
- * must be chosen so that all, or almost all, the working
- * set will be accessed before the limit.
- *
- * Epoch markers only appear in the LRU list, never in
- * the index or slist. While they are of type
- * H5C__EPOCH_MARKER_TYPE, and have associated class
- * functions, these functions should never be called.
- *
- * The addr fields of these instances of H5C_cache_entry_t
- * are set to the index of the instance in the epoch_markers
- * array, the size is set to 0, and the type field points
- * to the constant structure epoch_marker_class defined
- * in H5C.c. The next and prev fields are used as usual
- * to link the entry into the LRU list.
- *
- * All other fields are unused.
- *
- *
- * Cache hit rate collection fields:
- *
- * We supply the current cache hit rate on request, so we must keep a
- * simple cache hit rate computation regardless of whether statistics
- * collection is enabled. The following fields support this capability.
- *
- * cache_hits: Number of cache hits since the last time the cache hit
- * rate statistics were reset. Note that when automatic cache
- * re-sizing is enabled, this field will be reset every automatic
- * resize epoch.
- *
- * cache_accesses: Number of times the cache has been accessed while
- * since the last since the last time the cache hit rate statistics
- * were reset. Note that when automatic cache re-sizing is enabled,
- * this field will be reset every automatic resize epoch.
- *
- *
- * Statistics collection fields:
- *
- * When enabled, these fields are used to collect statistics as described
- * below. The first set are collected only when H5C_COLLECT_CACHE_STATS
- * is true.
- *
- * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
- * equal to the array index has been in cache when requested in
- * the current epoch.
- *
- * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
- * equal to the array index has not been in cache when
- * requested in the current epoch.
- *
- * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry with
- * type id equal to the array index has been write protected
- * in the current epoch.
- *
- * Observe that (hits + misses) = (write_protects + read_protects).
- *
- * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry with
- * type id equal to the array index has been read protected in
- * the current epoch.
- *
- * Observe that (hits + misses) = (write_protects + read_protects).
- *
- * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to maximum number of simultaneous read
- * protects on any entry with type id equal to the array index
- * in the current epoch.
- *
- * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been inserted into the
- * cache in the current epoch.
- *
- * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
- * with type id equal to the array index has been inserted
- * pinned into the cache in the current epoch.
- *
- * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times a dirty entry with type
- * id equal to the array index has been cleared in the current
- * epoch.
- *
- * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
- * equal to the array index has been written to disk in the
- * current epoch.
- *
- * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
- * equal to the array index has been evicted from the cache in
- * the current epoch.
- *
- * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been moved in the current
- * epoch.
- *
- * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
- * with type id equal to the array index has been moved
- * during its flush callback in the current epoch.
- *
- * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
- * with type id equal to the array index has been moved
- * during a cache flush in the current epoch.
- *
- * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been pinned in the current
- * epoch.
- *
- * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been unpinned in the current
- * epoch.
- *
- * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been marked dirty while pinned
- * in the current epoch.
- *
- * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry
- * with type id equal to the array index has been flushed while
- * pinned in the current epoch.
- *
- * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry
- * with type id equal to the array index has been cleared while
- * pinned in the current epoch.
- *
- * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
- * with type id equal to the array index has increased in
- * size in the current epoch.
- *
- * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
- * with type id equal to the array index has decreased in
- * size in the current epoch.
- *
- * entry_flush_size_changes: Array of int64 of length
- * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
- * the number of times an entry with type id equal to the
- * array index has changed size while in its flush callback.
- *
- * cache_flush_size_changes: Array of int64 of length
- * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
- * the number of times an entry with type id equal to the
- * array index has changed size during a cache flush
- *
- * total_ht_insertions: Number of times entries have been inserted into the
- * hash table in the current epoch.
- *
- * total_ht_deletions: Number of times entries have been deleted from the
- * hash table in the current epoch.
- *
- * successful_ht_searches: int64 containing the total number of successful
- * searches of the hash table in the current epoch.
- *
- * total_successful_ht_search_depth: int64 containing the total number of
- * entries other than the targets examined in successful
- * searches of the hash table in the current epoch.
- *
- * failed_ht_searches: int64 containing the total number of unsuccessful
- * searches of the hash table in the current epoch.
- *
- * total_failed_ht_search_depth: int64 containing the total number of
- * entries examined in unsuccessful searches of the hash
- * table in the current epoch.
- *
- * max_index_len: Largest value attained by the index_len field in the
- * current epoch.
- *
- * max_index_size: Largest value attained by the index_size field in the
- * current epoch.
- *
- * max_clean_index_size: Largest value attained by the clean_index_size field
- * in the current epoch.
- *
- * max_dirty_index_size: Largest value attained by the dirty_index_size field
- * in the current epoch.
- *
- * max_slist_len: Largest value attained by the slist_len field in the
- * current epoch.
- *
- * max_slist_size: Largest value attained by the slist_size field in the
- * current epoch.
- *
- * max_pl_len: Largest value attained by the pl_len field in the
- * current epoch.
- *
- * max_pl_size: Largest value attained by the pl_size field in the
- * current epoch.
- *
- * max_pel_len: Largest value attained by the pel_len field in the
- * current epoch.
- *
- * max_pel_size: Largest value attained by the pel_size field in the
- * current epoch.
- *
- * calls_to_msic: Total number of calls to H5C_make_space_in_cache
- *
- * total_entries_skipped_in_msic: Number of clean entries skipped while
- * enforcing the min_clean_fraction in H5C_make_space_in_cache().
- *
- * total_entries_scanned_in_msic: Number of clean entries skipped while
- * enforcing the min_clean_fraction in H5C_make_space_in_cache().
- *
- * max_entries_skipped_in_msic: Maximum number of clean entries skipped
- * in any one call to H5C_make_space_in_cache().
- *
- * max_entries_scanned_in_msic: Maximum number of entries scanned over
- * in any one call to H5C_make_space_in_cache().
- *
- * entries_scanned_to_make_space: Number of entries scanned only when looking
- * for entries to evict in order to make space in cache.
-
- * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
- * and H5C_COLLECT_CACHE_ENTRY_STATS are true.
- *
- * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times any single
- * entry with type id equal to the array index has been
- * accessed in the current epoch.
- *
- * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the minimum number of times any single
- * entry with type id equal to the array index has been
- * accessed in the current epoch.
- *
- * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times any single
- * entry with type id equal to the array index has been cleared
- * in the current epoch.
- *
- * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times any single
- * entry with type id equal to the array index has been
- * flushed in the current epoch.
- *
- * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum size of any single entry
- * with type id equal to the array index that has resided in
- * the cache in the current epoch.
- *
- * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times that any single
- * entry with type id equal to the array index that has been
- * marked as pinned in the cache in the current epoch.
- *
- *
- * Fields supporting testing:
- *
- * prefix Array of char used to prefix debugging output. The
- * field is intended to allow marking of output of with
- * the processes mpi rank.
- *
- * get_entry_ptr_from_addr_counter: Counter used to track the number of
- * times the H5C_get_entry_ptr_from_addr() function has been
- * called successfully. This field is only defined when
- * NDEBUG is not #defined.
- *
- ****************************************************************************/
-
+/* Cache configuration settings */
#define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */
-
#define H5C__H5C_T_MAGIC 0x005CAC0E
-#define H5C__MAX_NUM_TYPE_IDS 27
+#define H5C__MAX_NUM_TYPE_IDS 28
#define H5C__PREFIX_LEN 32
-struct H5C_t
-{
- uint32_t magic;
-
- hbool_t flush_in_progress;
-
- FILE * trace_file_ptr;
-
- void * aux_ptr;
-
- int32_t max_type_id;
- const char * (* type_name_table_ptr);
-
- size_t max_cache_size;
- size_t min_clean_size;
-
- H5C_write_permitted_func_t check_write_permitted;
- hbool_t write_permitted;
-
- H5C_log_flush_func_t log_flush;
-
- hbool_t evictions_enabled;
-
- int32_t index_len;
- size_t index_size;
- size_t clean_index_size;
- size_t dirty_index_size;
- H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]);
-
- hbool_t ignore_tags;
-
- int32_t slist_len;
- size_t slist_size;
- H5SL_t * slist_ptr;
- int32_t num_last_entries;
-#if H5C_DO_SANITY_CHECKS
- int64_t slist_len_increase;
- int64_t slist_size_increase;
-#endif /* H5C_DO_SANITY_CHECKS */
-
- int32_t pl_len;
- size_t pl_size;
- H5C_cache_entry_t * pl_head_ptr;
- H5C_cache_entry_t * pl_tail_ptr;
-
- int32_t pel_len;
- size_t pel_size;
- H5C_cache_entry_t * pel_head_ptr;
- H5C_cache_entry_t * pel_tail_ptr;
-
- int32_t LRU_list_len;
- size_t LRU_list_size;
- H5C_cache_entry_t * LRU_head_ptr;
- H5C_cache_entry_t * LRU_tail_ptr;
-
- int32_t cLRU_list_len;
- size_t cLRU_list_size;
- H5C_cache_entry_t * cLRU_head_ptr;
- H5C_cache_entry_t * cLRU_tail_ptr;
-
- int32_t dLRU_list_len;
- size_t dLRU_list_size;
- H5C_cache_entry_t * dLRU_head_ptr;
- H5C_cache_entry_t * dLRU_tail_ptr;
-
- hbool_t size_increase_possible;
- hbool_t flash_size_increase_possible;
- size_t flash_size_increase_threshold;
- hbool_t size_decrease_possible;
- hbool_t resize_enabled;
- hbool_t cache_full;
- hbool_t size_decreased;
- H5C_auto_size_ctl_t resize_ctl;
-
- int32_t epoch_markers_active;
- hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS];
- int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1];
- int32_t epoch_marker_ringbuf_first;
- int32_t epoch_marker_ringbuf_last;
- int32_t epoch_marker_ringbuf_size;
- H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS];
-
- int64_t cache_hits;
- int64_t cache_accesses;
-
-#if H5C_COLLECT_CACHE_STATS
-
- /* stats fields */
- int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
-
- int64_t total_ht_insertions;
- int64_t total_ht_deletions;
- int64_t successful_ht_searches;
- int64_t total_successful_ht_search_depth;
- int64_t failed_ht_searches;
- int64_t total_failed_ht_search_depth;
-
- int32_t max_index_len;
- size_t max_index_size;
- size_t max_clean_index_size;
- size_t max_dirty_index_size;
-
- int32_t max_slist_len;
- size_t max_slist_size;
-
- int32_t max_pl_len;
- size_t max_pl_size;
-
- int32_t max_pel_len;
- size_t max_pel_size;
-
- int64_t calls_to_msic;
- int64_t total_entries_skipped_in_msic;
- int64_t total_entries_scanned_in_msic;
- int32_t max_entries_skipped_in_msic;
- int32_t max_entries_scanned_in_msic;
- int64_t entries_scanned_to_make_space;
-
-#if H5C_COLLECT_CACHE_ENTRY_STATS
-
- int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
- size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1];
-
-#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
-
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- char prefix[H5C__PREFIX_LEN];
-
-#ifndef NDEBUG
-
- int64_t get_entry_ptr_from_addr_counter;
-
-#endif /* NDEBUG */
-};
-
-
-/****************************************************************************/
-/***************************** Macro Definitions ****************************/
-/****************************************************************************/
-
/****************************************************************************
*
* We maintain doubly linked lists of instances of H5C_cache_entry_t for a
@@ -1055,11 +91,11 @@ struct H5C_t
*
* from the H5C__DLL_PRE_REMOVE_SC macro. With the addition of the
* epoch markers used in the age out based cache size reduction algorithm,
- * this invarient need not hold, as the epoch markers are of size 0.
+ * this invariant need not hold, as the epoch markers are of size 0.
*
* One could argue that I should have given the epoch markers a positive
* size, but this would break the index_size = LRU_list_size + pl_size
- * + pel_size invarient.
+ * + pel_size invariant.
*
* Alternatively, I could pass the current decr_mode in to the macro,
* and just skip the check whenever epoch markers may be in use.
@@ -1537,6 +573,15 @@ if ( ( (entry_ptr) == NULL ) || \
#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \
((cache_ptr)->unpins)[(entry_ptr)->type->id]++;
+#define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) \
+ ((cache_ptr)->slist_scan_restarts)++;
+
+#define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) \
+ ((cache_ptr)->LRU_scan_restarts)++;
+
+#define H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr) \
+ ((cache_ptr)->hash_bucket_scan_restarts)++;
+
#if H5C_COLLECT_CACHE_ENTRY_STATS
#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \
@@ -1563,9 +608,12 @@ if ( ( (entry_ptr) == NULL ) || \
((entry_ptr)->flushes)++; \
}
-#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \
-{ \
- (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \
+#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \
+{ \
+ if ( take_ownership ) \
+ (((cache_ptr)->take_ownerships)[(entry_ptr)->type->id])++; \
+ else \
+ (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \
if ( (entry_ptr)->accesses > \
((cache_ptr)->max_accesses)[(entry_ptr)->type->id] ) \
((cache_ptr)->max_accesses)[(entry_ptr)->type->id] = \
@@ -1615,7 +663,6 @@ if ( ( (entry_ptr) == NULL ) || \
((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \
((cache_ptr)->max_size)[(entry_ptr)->type->id] \
= (entry_ptr)->size; \
- } \
}
#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
@@ -1674,8 +721,13 @@ if ( ( (entry_ptr) == NULL ) || \
(((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \
}
-#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \
- (((cache_ptr)->evictions)[(entry_ptr)->type->id])++;
+#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \
+{ \
+ if ( take_ownership ) \
+ (((cache_ptr)->take_ownerships)[(entry_ptr)->type->id])++; \
+ else \
+ (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \
+}
#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
{ \
@@ -1745,10 +797,13 @@ if ( ( (entry_ptr) == NULL ) || \
#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
-#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr)
+#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
+#define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+#define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
+#define H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr)
#endif /* H5C_COLLECT_CACHE_STATS */
@@ -1781,21 +836,35 @@ if ( ( (entry_ptr) == NULL ) || \
#if H5C_DO_SANITY_CHECKS
-#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
-if ( ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
- ( (entry_ptr) == NULL ) || \
- ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
- ( (entry_ptr)->ht_next != NULL ) || \
- ( (entry_ptr)->ht_prev != NULL ) || \
- ( (entry_ptr)->size <= 0 ) || \
- ( (k = H5C__HASH_FCN((entry_ptr)->addr)) < 0 ) || \
- ( k >= H5C__HASH_TABLE_LEN ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
- "Pre HT insert SC failed") \
+#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
+ ( (entry_ptr)->ht_next != NULL ) || \
+ ( (entry_ptr)->ht_prev != NULL ) || \
+ ( (entry_ptr)->size <= 0 ) || \
+ ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \
+ ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
+ "Pre HT insert SC failed") \
+}
+
+#define H5C__POST_HT_INSERT_SC(cache_ptr, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
+ "Post HT insert SC failed") \
}
#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
@@ -1818,10 +887,28 @@ if ( ( (cache_ptr) == NULL ) || \
( (entry_ptr)->ht_prev != NULL ) ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) ) { \
+ (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT remove SC failed") \
}
+#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
+ ( (entry_ptr)->size <= 0 ) || \
+ ( (entry_ptr)->ht_prev != NULL ) || \
+ ( (entry_ptr)->ht_prev != NULL ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Post HT remove SC failed") \
+}
+
/* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
if ( ( (cache_ptr) == NULL ) || \
@@ -1880,7 +967,8 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
- ( (entry_ptr == NULL) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
( ( !( was_clean ) || \
( (cache_ptr)->clean_index_size < (old_size) ) ) && \
( ( (was_clean) ) || \
@@ -1899,6 +987,8 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
( ( !((entry_ptr)->is_dirty ) || \
( (cache_ptr)->dirty_index_size < (new_size) ) ) && \
( ( ((entry_ptr)->is_dirty) ) || \
@@ -1919,7 +1009,9 @@ if ( \
( (cache_ptr)->index_size < (entry_ptr)->size ) || \
( (cache_ptr)->dirty_index_size < (entry_ptr)->size ) || \
( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) ) { \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Pre HT update for entry clean SC failed") \
}
@@ -1934,21 +1026,27 @@ if ( \
( (cache_ptr)->index_size < (entry_ptr)->size ) || \
( (cache_ptr)->clean_index_size < (entry_ptr)->size ) || \
( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) ) { \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Pre HT update for entry dirty SC failed") \
}
#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
-if ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) { \
+if ( ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Post HT update for entry clean SC failed") \
}
#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
-if ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) { \
+if ( ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Post HT update for entry dirty SC failed") \
}
@@ -1956,7 +1054,9 @@ if ( (cache_ptr)->index_size != \
#else /* H5C_DO_SANITY_CHECKS */
#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val)
+#define H5C__POST_HT_INSERT_SC(cache_ptr, fail_val)
#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr)
+#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr)
#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val)
#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val)
#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val)
@@ -1978,27 +1078,24 @@ if ( (cache_ptr)->index_size != \
H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
k = H5C__HASH_FCN((entry_ptr)->addr); \
if ( ((cache_ptr)->index)[k] == NULL ) \
- { \
((cache_ptr)->index)[k] = (entry_ptr); \
- } \
- else \
- { \
+ else { \
(entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
(entry_ptr)->ht_next->ht_prev = (entry_ptr); \
((cache_ptr)->index)[k] = (entry_ptr); \
} \
(cache_ptr)->index_len++; \
(cache_ptr)->index_size += (entry_ptr)->size; \
- if ( (entry_ptr)->is_dirty ) { \
+ if ( (entry_ptr)->is_dirty ) \
(cache_ptr)->dirty_index_size += (entry_ptr)->size; \
- } else { \
+ else \
(cache_ptr)->clean_index_size += (entry_ptr)->size; \
- } \
if ((entry_ptr)->flush_me_last) { \
(cache_ptr)->num_last_entries++; \
- HDassert((cache_ptr)->num_last_entries == 1); \
+ HDassert((cache_ptr)->num_last_entries <= 2); \
} \
H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
+ H5C__POST_HT_INSERT_SC(cache_ptr, fail_val) \
}
#define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr) \
@@ -2007,31 +1104,25 @@ if ( (cache_ptr)->index_size != \
H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
k = H5C__HASH_FCN((entry_ptr)->addr); \
if ( (entry_ptr)->ht_next ) \
- { \
(entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- } \
if ( (entry_ptr)->ht_prev ) \
- { \
(entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- } \
if ( ((cache_ptr)->index)[k] == (entry_ptr) ) \
- { \
((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \
- } \
(entry_ptr)->ht_next = NULL; \
(entry_ptr)->ht_prev = NULL; \
(cache_ptr)->index_len--; \
(cache_ptr)->index_size -= (entry_ptr)->size; \
- if ( (entry_ptr)->is_dirty ) { \
+ if ( (entry_ptr)->is_dirty ) \
(cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
- } else { \
+ else \
(cache_ptr)->clean_index_size -= (entry_ptr)->size; \
- } \
if ((entry_ptr)->flush_me_last) { \
(cache_ptr)->num_last_entries--; \
- HDassert((cache_ptr)->num_last_entries == 0); \
+ HDassert((cache_ptr)->num_last_entries <= 1); \
} \
H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
+ H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
}
#define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \
@@ -2041,20 +1132,15 @@ if ( (cache_ptr)->index_size != \
H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
k = H5C__HASH_FCN(Addr); \
entry_ptr = ((cache_ptr)->index)[k]; \
- while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \
- { \
+ while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) { \
(entry_ptr) = (entry_ptr)->ht_next; \
(depth)++; \
} \
- if ( entry_ptr ) \
- { \
+ if ( entry_ptr ) { \
H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
- if ( entry_ptr != ((cache_ptr)->index)[k] ) \
- { \
+ if ( entry_ptr != ((cache_ptr)->index)[k] ) { \
if ( (entry_ptr)->ht_next ) \
- { \
(entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- } \
HDassert( (entry_ptr)->ht_prev != NULL ); \
(entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
@@ -2074,20 +1160,15 @@ if ( (cache_ptr)->index_size != \
H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
k = H5C__HASH_FCN(Addr); \
entry_ptr = ((cache_ptr)->index)[k]; \
- while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \
- { \
+ while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) { \
(entry_ptr) = (entry_ptr)->ht_next; \
(depth)++; \
} \
- if ( entry_ptr ) \
- { \
+ if ( entry_ptr ) { \
H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
- if ( entry_ptr != ((cache_ptr)->index)[k] ) \
- { \
+ if ( entry_ptr != ((cache_ptr)->index)[k] ) { \
if ( (entry_ptr)->ht_next ) \
- { \
(entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- } \
HDassert( (entry_ptr)->ht_prev != NULL ); \
(entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
@@ -2122,16 +1203,14 @@ if ( (cache_ptr)->index_size != \
entry_ptr, was_clean) \
(cache_ptr)->index_size -= (old_size); \
(cache_ptr)->index_size += (new_size); \
- if ( was_clean ) { \
+ if ( was_clean ) \
(cache_ptr)->clean_index_size -= (old_size); \
- } else { \
+ else \
(cache_ptr)->dirty_index_size -= (old_size); \
- } \
- if ( (entry_ptr)->is_dirty ) { \
+ if ( (entry_ptr)->is_dirty ) \
(cache_ptr)->dirty_index_size += (new_size); \
- } else { \
+ else \
(cache_ptr)->clean_index_size += (new_size); \
- } \
H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
entry_ptr) \
}
@@ -2194,9 +1273,20 @@ if ( (cache_ptr)->index_size != \
* able to dirty, resize and/or move entries during the
* flush.
*
+ * JRM -- 12/13/14
+ * Added code to set cache_ptr->slist_changed to TRUE
+ * when an entry is inserted in the slist.
+ *
*-------------------------------------------------------------------------
*/
+#if H5C_DO_SLIST_SANITY_CHECKS
+#define ENTRY_IN_SLIST(cache_ptr, entry_ptr) \
+ H5C_entry_in_skip_list((cache_ptr), (entry_ptr))
+#else /* H5C_DO_SLIST_SANITY_CHECKS */
+#define ENTRY_IN_SLIST(cache_ptr, entry_ptr) FALSE
+#endif /* H5C_DO_SLIST_SANITY_CHECKS */
+
#if H5C_DO_SANITY_CHECKS
#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
@@ -2207,13 +1297,14 @@ if ( (cache_ptr)->index_size != \
HDassert( (entry_ptr)->size > 0 ); \
HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
HDassert( !((entry_ptr)->in_slist) ); \
+ HDassert( !ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
\
- if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) \
- < 0 ) \
+ if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
"Can't insert entry in skip list") \
\
(entry_ptr)->in_slist = TRUE; \
+ (cache_ptr)->slist_changed = TRUE; \
(cache_ptr)->slist_len++; \
(cache_ptr)->slist_size += (entry_ptr)->size; \
(cache_ptr)->slist_len_increase++; \
@@ -2234,13 +1325,14 @@ if ( (cache_ptr)->index_size != \
HDassert( (entry_ptr)->size > 0 ); \
HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
HDassert( !((entry_ptr)->in_slist) ); \
+ HDassert( !ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
\
- if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) \
- < 0 ) \
+ if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
"Can't insert entry in skip list") \
\
(entry_ptr)->in_slist = TRUE; \
+ (cache_ptr)->slist_changed = TRUE; \
(cache_ptr)->slist_len++; \
(cache_ptr)->slist_size += (entry_ptr)->size; \
\
@@ -2281,6 +1373,10 @@ if ( (cache_ptr)->index_size != \
* Updated sanity checks for the new is_read_only and
* ro_ref_count fields in H5C_cache_entry_t.
*
+ * JRM -- 12/13/14
+ * Added code to set cache_ptr->slist_changed to TRUE
+ * when an entry is removed from the slist.
+ *
*-------------------------------------------------------------------------
*/
@@ -2297,12 +1393,12 @@ if ( (cache_ptr)->index_size != \
HDassert( (cache_ptr)->slist_ptr ); \
\
if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
- != (entry_ptr) ) \
- \
+ != (entry_ptr) ) \
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
"Can't delete entry from skip list.") \
\
HDassert( (cache_ptr)->slist_len > 0 ); \
+ (cache_ptr)->slist_changed = TRUE; \
(cache_ptr)->slist_len--; \
HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
(cache_ptr)->slist_size -= (entry_ptr)->size; \
@@ -2333,6 +1429,11 @@ if ( (cache_ptr)->index_size != \
* able to dirty, resize and/or move entries during the
* flush.
*
+ * JRM -- 12/13/14
+ * Note that we do not set cache_ptr->slist_changed to TRUE
+ * in this case, as the structure of the slist is not
+ * modified.
+ *
*-------------------------------------------------------------------------
*/
@@ -3601,5 +2702,1074 @@ if ( (cache_ptr)->index_size != \
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+/****************************/
+/* Package Private Typedefs */
+/****************************/
+
+/****************************************************************************
+ *
+ * structure H5C_t
+ *
+ * Catchall structure for all variables specific to an instance of the cache.
+ *
+ * While the individual fields of the structure are discussed below, the
+ * following overview may be helpful.
+ *
+ * Entries in the cache are stored in an instance of H5TB_TREE, indexed on
+ * the entry's disk address. While the H5TB_TREE is less efficient than
+ * hash table, it keeps the entries in address sorted order. As flushes
+ * in parallel mode are more efficient if they are issued in increasing
+ * address order, this is a significant benefit. Also the H5TB_TREE code
+ * was readily available, which reduced development time.
+ *
+ * While the cache was designed with multiple replacement policies in mind,
+ * at present only a modified form of LRU is supported.
+ *
+ * JRM - 4/26/04
+ *
+ * Profiling has indicated that searches in the instance of H5TB_TREE are
+ * too expensive. To deal with this issue, I have augmented the cache
+ * with a hash table in which all entries will be stored. Given the
+ * advantages of flushing entries in increasing address order, the TBBT
+ * is retained, but only dirty entries are stored in it. At least for
+ * now, we will leave entries in the TBBT after they are flushed.
+ *
+ * Note that index_size and index_len now refer to the total size of
+ * and number of entries in the hash table.
+ *
+ * JRM - 7/19/04
+ *
+ * The TBBT has since been replaced with a skip list. This change
+ * greatly predates this note.
+ *
+ * JRM - 9/26/05
+ *
+ * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC.
+ * This field is used to validate pointers to instances of
+ * H5C_t.
+ *
+ * flush_in_progress: Boolean flag indicating whether a flush is in
+ * progress.
+ *
+ * trace_file_ptr: File pointer pointing to the trace file, which is used
+ * to record cache operations for use in simulations and design
+ * studies. This field will usually be NULL, indicating that
+ * no trace file should be recorded.
+ *
+ * Since much of the code supporting the parallel metadata
+ * cache is in H5AC, we don't write the trace file from
+ * H5C. Instead, H5AC reads the trace_file_ptr as needed.
+ *
+ * When we get to using H5C in other places, we may add
+ * code to write trace file data at the H5C level as well.
+ *
+ * aux_ptr: Pointer to void used to allow wrapper code to associate
+ * its data with an instance of H5C_t. The H5C cache code
+ * sets this field to NULL, and otherwise leaves it alone.
+ *
+ * max_type_id: Integer field containing the maximum type id number assigned
+ * to a type of entry in the cache. All type ids from 0 to
+ * max_type_id inclusive must be defined. The names of the
+ * types are stored in the type_name_table discussed below, and
+ * indexed by the ids.
+ *
+ * type_name_table_ptr: Pointer to an array of pointer to char of length
+ * max_type_id + 1. The strings pointed to by the entries
+ * in the array are the names of the entry types associated
+ * with the indexing type IDs.
+ *
+ * max_cache_size: Nominal maximum number of bytes that may be stored in the
+ * cache. This value should be viewed as a soft limit, as the
+ * cache can exceed this value under the following circumstances:
+ *
+ * a) All entries in the cache are protected, and the cache is
+ * asked to insert a new entry. In this case the new entry
+ * will be created. If this causes the cache to exceed
+ * max_cache_size, it will do so. The cache will attempt
+ * to reduce its size as entries are unprotected.
+ *
+ * b) When running in parallel mode, the cache may not be
+ * permitted to flush a dirty entry in response to a read.
+ * If there are no clean entries available to evict, the
+ * cache will exceed its maximum size. Again the cache
+ * will attempt to reduce its size to the max_cache_size
+ * limit on the next cache write.
+ *
+ * c) When an entry increases in size, the cache may exceed
+ * the max_cache_size limit until the next time the cache
+ * attempts to load or insert an entry.
+ *
+ * d) When the evictions_enabled field is false (see below),
+ * the cache size will increase without limit until the
+ * field is set to true.
+ *
+ * min_clean_size: Nominal minimum number of clean bytes in the cache.
+ * The cache attempts to maintain this number of bytes of
+ * clean data so as to avoid case b) above. Again, this is
+ * a soft limit.
+ *
+ *
+ * In addition to the call back functions required for each entry, the
+ * cache requires the following call back functions for this instance of
+ * the cache as a whole:
+ *
+ * check_write_permitted: In certain applications, the cache may not
+ * be allowed to write to disk at certain time. If specified,
+ * the check_write_permitted function is used to determine if
+ * a write is permissible at any given point in time.
+ *
+ * If no such function is specified (i.e. this field is NULL),
+ * the cache uses the following write_permitted field to
+ * determine whether writes are permitted.
+ *
+ * write_permitted: If check_write_permitted is NULL, this boolean flag
+ * indicates whether writes are permitted.
+ *
+ * log_flush: If provided, this function is called whenever a dirty
+ * entry is flushed to disk.
+ *
+ *
+ * In cases where memory is plentiful, and performance is an issue, it may
+ * be useful to disable all cache evictions, and thereby postpone metadata
+ * writes. The following field is used to implement this.
+ *
+ * evictions_enabled: Boolean flag that is initialized to TRUE. When
+ * this flag is set to FALSE, the metadata cache will not
+ * attempt to evict entries to make space for newly protected
+ * entries, and instead the will grow without limit.
+ *
+ * Needless to say, this feature must be used with care.
+ *
+ *
+ * The cache requires an index to facilitate searching for entries. The
+ * following fields support that index.
+ *
+ * index_len: Number of entries currently in the hash table used to index
+ * the cache.
+ *
+ * index_size: Number of bytes of cache entries currently stored in the
+ * hash table used to index the cache.
+ *
+ * This value should not be mistaken for footprint of the
+ * cache in memory. The average cache entry is small, and
+ * the cache has a considerable overhead. Multiplying the
+ * index_size by three should yield a conservative estimate
+ * of the cache's memory footprint.
+ *
+ * clean_index_size: Number of bytes of clean entries currently stored in
+ * the hash table. Note that the index_size field (above)
+ * is also the sum of the sizes of all entries in the cache.
+ * Thus we should have the invariant that clean_index_size +
+ * dirty_index_size == index_size.
+ *
+ * WARNING:
+ *
+ * The value of the clean_index_size must not be mistaken
+ * for the current clean size of the cache. Rather, the
+ * clean size of the cache is the current value of
+ * clean_index_size plus the amount of empty space (if any)
+ * in the cache.
+ *
+ * dirty_index_size: Number of bytes of dirty entries currently stored in
+ * the hash table. Note that the index_size field (above)
+ * is also the sum of the sizes of all entries in the cache.
+ * Thus we should have the invariant that clean_index_size +
+ * dirty_index_size == index_size.
+ *
+ * index: Array of pointer to H5C_cache_entry_t of size
+ * H5C__HASH_TABLE_LEN. At present, this value is a power
+ * of two, not the usual prime number.
+ *
+ * I hope that the variable size of cache elements, the large
+ * hash table size, and the way in which HDF5 allocates space
+ * will combine to avoid problems with periodicity. If so, we
+ * can use a trivial hash function (a bit-and and a 3 bit left
+ * shift) with some small savings.
+ *
+ * If not, it will become evident in the statistics. Changing
+ * to the usual prime number length hash table will require
+ * changing the H5C__HASH_FCN macro and the deletion of the
+ * H5C__HASH_MASK #define. No other changes should be required.
+ *
+ * With the addition of the take ownership flag, it is possible that
+ * an entry may be removed from the cache as the result of the flush of
+ * a second entry. In general, this causes little trouble, but it is
+ * possible that the entry removed may be the next entry in the scan of
+ * a list. In this case, we must be able to detect the fact that the
+ * entry has been removed, so that the scan doesn't attempt to proceed with
+ * an entry that is no longer in the cache.
+ *
+ * The following fields are maintained to facilitate this.
+ *
+ * entries_removed_counter: Counter that is incremented each time an
+ * entry is removed from the cache by any means (eviction,
+ * expungement, or take ownership at this point in time).
+ * Functions that perform scans on lists may set this field
+ * to zero prior to calling H5C_flush_single_entry().
+ * Unexpected changes to the counter indicate that an entry
+ * was removed from the cache as a side effect of the flush.
+ *
+ * last_entry_removed_ptr: Pointer to the instance of H5C_cache_entry_t
+ * which contained the last entry to be removed from the cache,
+ * or NULL if there either is no such entry, or if a function
+ * performing a scan of a list has set this field to NULL prior
+ * to calling H5C_flush_single_entry().
+ *
+ * WARNING!!! This field must NEVER be dereferenced. It is
+ * maintained to allow functions that perform scans of lists
+ * to compare this pointer with their pointers to next, thus
+ * allowing them to avoid unnecessary restarts of scans if the
+ * pointers don't match, and if entries_removed_counter is
+ * one.
+ *
+ *
+ * With the addition of cache entry tagging, it is possible that
+ * an entry may be inserted into the cache without a tag during testing
+ * and the tag's validity shouldn't be checked.
+ *
+ * The following field is maintained to facilitate this.
+ *
+ * ignore_tags: Boolean flag to disable tag validation during entry insertion.
+ *
+ * When we flush the cache, we need to write entries out in increasing
+ * address order. An instance of a skip list is used to store dirty entries in
+ * sorted order. Whether it is cheaper to sort the dirty entries as needed,
+ * or to maintain the list is an open question. At a guess, it depends
+ * on how frequently the cache is flushed. We will see how it goes.
+ *
+ * For now at least, I will not remove dirty entries from the list as they
+ * are flushed. (this has been changed -- dirty entries are now removed from
+ * the skip list as they are flushed. JRM - 10/25/05)
+ *
+ * slist_changed: Boolean flag used to indicate whether the contents of
+ * the slist has changed since the last time this flag was
+ * reset. This is used in the cache flush code to detect
+ * conditions in which pre-serialize or serialize callbacks
+ * have modified the slist -- which obliges us to restart
+ * the scan of the slist from the beginning.
+ *
+ * slist_change_in_pre_serialize: Boolean flag used to indicate that
+ * a pre_serialize call has modified the slist since the
+ * last time this flag was reset.
+ *
+ * slist_change_in_serialize: Boolean flag used to indicate that
+ * a serialize call has modified the slist since the
+ * last time this flag was reset.
+ *
+ * slist_len: Number of entries currently in the skip list
+ * used to maintain a sorted list of dirty entries in the
+ * cache.
+ *
+ * slist_size: Number of bytes of cache entries currently stored in the
+ * skip list used to maintain a sorted list of
+ * dirty entries in the cache.
+ *
+ * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted
+ * list of dirty entries in the cache. This sorted list has
+ * two uses:
+ *
+ * a) It allows us to flush dirty entries in increasing address
+ * order, which results in significant savings.
+ *
+ * b) It facilitates checking for adjacent dirty entries when
+ * attempting to evict entries from the cache. While we
+ * don't use this at present, I hope that this will allow
+ * some optimizations when I get to it.
+ *
+ * num_last_entries: The number of entries in the cache that can only be
+ * flushed after all other entries in the cache have
+ * been flushed. At this time, this will only ever be
+ * one entry (the superblock), and the code has been
+ * protected with HDasserts to enforce this. This restraint
+ * can certainly be relaxed in the future if the need for
+ * multiple entries being flushed last arises, though
+ * explicit tests for that case should be added when said
+ * HDasserts are removed.
+ *
+ * Update: There are now two possible last entries
+ * (superblock and file driver info message). This
+ * number will probably increase as we add superblock
+ * messages. JRM -- 11/18/14
+ *
+ * With the addition of the fractal heap, the cache must now deal with
+ * the case in which entries may be dirtied, moved, or have their sizes
+ * changed during a flush. To allow sanity checks in this situation, the
+ * following two fields have been added. They are only compiled in when
+ * H5C_DO_SANITY_CHECKS is TRUE.
+ *
+ * slist_len_increase: Number of entries that have been added to the
+ * slist since the last time this field was set to zero.
+ * Note that this value can be negative.
+ *
+ * slist_size_increase: Total size of all entries that have been added
+ * to the slist since the last time this field was set to
+ * zero. Note that this value can be negative.
+ *
+ *
+ * When a cache entry is protected, it must be removed from the LRU
+ * list(s) as it cannot be either flushed or evicted until it is unprotected.
+ * The following fields are used to implement the protected list (pl).
+ *
+ * pl_len: Number of entries currently residing on the protected list.
+ *
+ * pl_size: Number of bytes of cache entries currently residing on the
+ * protected list.
+ *
+ * pl_head_ptr: Pointer to the head of the doubly linked list of protected
+ * entries. Note that cache entries on this list are linked
+ * by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected
+ * entries. Note that cache entries on this list are linked
+ * by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ *
+ * For very frequently used entries, the protect/unprotect overhead can
+ * become burdensome. To avoid this overhead, I have modified the cache
+ * to allow entries to be "pinned". A pinned entry is similar to a
+ * protected entry, in the sense that it cannot be evicted, and that
+ * the entry can be modified at any time.
+ *
+ * Pinning an entry has the following implications:
+ *
+ * 1) A pinned entry cannot be evicted. Thus unprotected
+ * pinned entries reside in the pinned entry list, instead
+ * of the LRU list(s) (or other lists maintained by the current
+ * replacement policy code).
+ *
+ * 2) A pinned entry can be accessed or modified at any time.
+ * This places an additional burden on the associated pre-serialize
+ * and serialize callbacks, which must ensure the the entry is in
+ * a consistant state before creating an image of it.
+ *
+ * 3) A pinned entry can be marked as dirty (and possibly
+ * change size) while it is unprotected.
+ *
+ * 4) The flush-destroy code must allow pinned entries to
+ * be unpinned (and possibly unprotected) during the
+ * flush.
+ *
+ * Since pinned entries cannot be evicted, they must be kept on a pinned
+ * entry list (pel), instead of being entrusted to the replacement policy
+ * code.
+ *
+ * Maintaining the pinned entry list requires the following fields:
+ *
+ * pel_len: Number of entries currently residing on the pinned
+ * entry list.
+ *
+ * pel_size: Number of bytes of cache entries currently residing on
+ * the pinned entry list.
+ *
+ * pel_head_ptr: Pointer to the head of the doubly linked list of pinned
+ * but not protected entries. Note that cache entries on
+ * this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned
+ * but not protected entries. Note that cache entries on
+ * this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ *
+ * The cache must have a replacement policy, and the fields supporting this
+ * policy must be accessible from this structure.
+ *
+ * While there has been interest in several replacement policies for
+ * this cache, the initial development schedule is tight. Thus I have
+ * elected to support only a modified LRU (least recently used) policy
+ * for the first cut.
+ *
+ * To further simplify matters, I have simply included the fields needed
+ * by the modified LRU in this structure. When and if we add support for
+ * other policies, it will probably be easiest to just add the necessary
+ * fields to this structure as well -- we only create one instance of this
+ * structure per file, so the overhead is not excessive.
+ *
+ *
+ * Fields supporting the modified LRU policy:
+ *
+ * See most any OS text for a discussion of the LRU replacement policy.
+ *
+ * When operating in parallel mode, we must ensure that a read does not
+ * cause a write. If it does, the process will hang, as the write will
+ * be collective and the other processes will not know to participate.
+ *
+ * To deal with this issue, I have modified the usual LRU policy by adding
+ * clean and dirty LRU lists to the usual LRU list. In general, these
+ * lists are only exist in parallel builds.
+ *
+ * The clean LRU list is simply the regular LRU list with all dirty cache
+ * entries removed.
+ *
+ * Similarly, the dirty LRU list is the regular LRU list with all the clean
+ * cache entries removed.
+ *
+ * When reading in parallel mode, we evict from the clean LRU list only.
+ * This implies that we must try to ensure that the clean LRU list is
+ * reasonably well stocked at all times.
+ *
+ * We attempt to do this by trying to flush enough entries on each write
+ * to keep the cLRU_list_size >= min_clean_size.
+ *
+ * Even if we start with a completely clean cache, a sequence of protects
+ * without unprotects can empty the clean LRU list. In this case, the
+ * cache must grow temporarily. At the next sync point, we will attempt to
+ * evict enough entries to reduce index_size to less than max_cache_size.
+ * While this will usually be possible, all bets are off if enough entries
+ * are protected.
+ *
+ * Discussions of the individual fields used by the modified LRU replacement
+ * policy follow:
+ *
+ * LRU_list_len: Number of cache entries currently on the LRU list.
+ *
+ * Observe that LRU_list_len + pl_len + pel_len must always
+ * equal index_len.
+ *
+ * LRU_list_size: Number of bytes of cache entries currently residing on the
+ * LRU list.
+ *
+ * Observe that LRU_list_size + pl_size + pel_size must always
+ * equal index_size.
+ *
+ * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache
+ * entries on this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache
+ * entries on this list are linked by their next and prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * cLRU_list_len: Number of cache entries currently on the clean LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * cLRU_list_size: Number of bytes of cache entries currently residing on
+ * the clean LRU list.
+ *
+ * Observe that cLRU_list_size + dLRU_list_size must always
+ * equal LRU_list_size.
+ *
+ * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * dLRU_list_len: Number of cache entries currently on the dirty LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * dLRU_list_size: Number of cache entries currently on the dirty LRU list.
+ *
+ * Observe that cLRU_list_len + dLRU_list_len must always
+ * equal LRU_list_len.
+ *
+ * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list.
+ * Cache entries on this list are linked by their aux_next and
+ * aux_prev fields.
+ *
+ * This field is NULL if the list is empty.
+ *
+ *
+ * Automatic cache size adjustment:
+ *
+ * While the default cache size is adequate for most cases, we can run into
+ * cases where the default is too small. Ideally, we will let the user
+ * adjust the cache size as required. However, this is not possible in all
+ * cases. Thus I have added automatic cache size adjustment code.
+ *
+ * The configuration for the automatic cache size adjustment is stored in
+ * the structure described below:
+ *
+ * size_increase_possible: Depending on the configuration data given
+ * in the resize_ctl field, it may or may not be possible
+ * to increase the size of the cache. Rather than test for
+ * all the ways this can happen, we simply set this flag when
+ * we receive a new configuration.
+ *
+ * flash_size_increase_possible: Depending on the configuration data given
+ * in the resize_ctl field, it may or may not be possible
+ * for a flash size increase to occur. We set this flag
+ * whenever we receive a new configuration so as to avoid
+ * repeated calculations.
+ *
+ * flash_size_increase_threshold: If a flash cache size increase is possible,
+ * this field is used to store the minimum size of a new entry
+ * or size increase needed to trigger a flash cache size
+ * increase. Note that this field must be updated whenever
+ * the size of the cache is changed.
+ *
+ * size_decrease_possible: Depending on the configuration data given
+ * in the resize_ctl field, it may or may not be possible
+ * to decrease the size of the cache. Rather than test for
+ * all the ways this can happen, we simply set this flag when
+ * we receive a new configuration.
+ *
+ * cache_full: Boolean flag used to keep track of whether the cache is
+ * full, so we can refrain from increasing the size of a
+ * cache which hasn't used up the space allotted to it.
+ *
+ * The field is initialized to FALSE, and then set to TRUE
+ * whenever we attempt to make space in the cache.
+ *
+ * resize_enabled: This is another convenience flag which is set whenever
+ * a new set of values for resize_ctl are provided. Very
+ * simply,
+ *
+ * resize_enabled = size_increase_possible ||
+ * size_decrease_possible;
+ *
+ * size_decreased: Boolean flag set to TRUE whenever the maximum cache
+ * size is decreased. The flag triggers a call to
+ * H5C_make_space_in_cache() on the next call to H5C_protect().
+ *
+ * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
+ * data for automatic cache resizing.
+ *
+ * epoch_markers_active: Integer field containing the number of epoch
+ * markers currently in use in the LRU list. This value
+ * must be in the range [0, H5C__MAX_EPOCH_MARKERS - 1].
+ *
+ * epoch_marker_active: Array of boolean of length H5C__MAX_EPOCH_MARKERS.
+ * This array is used to track which epoch markers are currently
+ * in use.
+ *
+ * epoch_marker_ringbuf: Array of int of length H5C__MAX_EPOCH_MARKERS + 1.
+ *
+ * To manage the epoch marker cache entries, it is necessary
+ * to track their order in the LRU list. This is done with
+ * epoch_marker_ringbuf. When markers are inserted at the
+ * head of the LRU list, the index of the marker in the
+ * epoch_markers array is inserted at the tail of the ring
+ * buffer. When it becomes the epoch_marker_active'th marker
+ * in the LRU list, it will have worked its way to the head
+ * of the ring buffer as well. This allows us to remove it
+ * without scanning the LRU list if such is required.
+ *
+ * epoch_marker_ringbuf_first: Integer field containing the index of the
+ * first entry in the ring buffer.
+ *
+ * epoch_marker_ringbuf_last: Integer field containing the index of the
+ * last entry in the ring buffer.
+ *
+ * epoch_marker_ringbuf_size: Integer field containing the number of entries
+ * in the ring buffer.
+ *
+ * epoch_markers: Array of instances of H5C_cache_entry_t of length
+ * H5C__MAX_EPOCH_MARKERS. The entries are used as markers
+ * in the LRU list to identify cache entries that haven't
+ * been accessed for some (small) specified number of
+ * epochs. These entries (if any) can then be evicted and
+ * the cache size reduced -- ideally without evicting any
+ * of the current working set. Needless to say, the epoch
+ * length and the number of epochs before an unused entry
+ * must be chosen so that all, or almost all, the working
+ * set will be accessed before the limit.
+ *
+ * Epoch markers only appear in the LRU list, never in
+ * the index or slist. While they are of type
+ * H5C__EPOCH_MARKER_TYPE, and have associated class
+ * functions, these functions should never be called.
+ *
+ * The addr fields of these instances of H5C_cache_entry_t
+ * are set to the index of the instance in the epoch_markers
+ * array, the size is set to 0, and the type field points
+ * to the constant structure epoch_marker_class defined
+ * in H5C.c. The next and prev fields are used as usual
+ * to link the entry into the LRU list.
+ *
+ * All other fields are unused.
+ *
+ *
+ * Cache hit rate collection fields:
+ *
+ * We supply the current cache hit rate on request, so we must keep a
+ * simple cache hit rate computation regardless of whether statistics
+ * collection is enabled. The following fields support this capability.
+ *
+ * cache_hits: Number of cache hits since the last time the cache hit
+ * rate statistics were reset. Note that when automatic cache
+ * re-sizing is enabled, this field will be reset every automatic
+ * resize epoch.
+ *
+ * cache_accesses: Number of times the cache has been accessed while
+ * since the last since the last time the cache hit rate statistics
+ * were reset. Note that when automatic cache re-sizing is enabled,
+ * this field will be reset every automatic resize epoch.
+ *
+ *
+ * Statistics collection fields:
+ *
+ * When enabled, these fields are used to collect statistics as described
+ * below. The first set are collected only when H5C_COLLECT_CACHE_STATS
+ * is true.
+ *
+ * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been in cache when requested in
+ * the current epoch.
+ *
+ * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has not been in cache when
+ * requested in the current epoch.
+ *
+ * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry with
+ * type id equal to the array index has been write protected
+ * in the current epoch.
+ *
+ * Observe that (hits + misses) = (write_protects + read_protects).
+ *
+ * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry with
+ * type id equal to the array index has been read protected in
+ * the current epoch.
+ *
+ * Observe that (hits + misses) = (write_protects + read_protects).
+ *
+ * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to maximum number of simultaneous read
+ * protects on any entry with type id equal to the array index
+ * in the current epoch.
+ *
+ * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been inserted into the
+ * cache in the current epoch.
+ *
+ * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has been inserted
+ * pinned into the cache in the current epoch.
+ *
+ * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times a dirty entry with type
+ * id equal to the array index has been cleared in the current
+ * epoch.
+ *
+ * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been written to disk in the
+ * current epoch.
+ *
+ * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type id
+ * equal to the array index has been evicted from the cache in
+ * the current epoch.
+ *
+ * take_ownerships: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry with
+ * type id equal to the array index has been removed from the
+ * cache via the H5C__TAKE_OWNERSHIP_FLAG in the current epoch.
+ *
+ * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been moved in the current
+ * epoch.
+ *
+ * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has been moved
+ * during its pre-serialize callback in the current epoch.
+ *
+ * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has been moved
+ * during a cache flush in the current epoch.
+ *
+ * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been pinned in the current
+ * epoch.
+ *
+ * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been unpinned in the current
+ * epoch.
+ *
+ * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the number of times an entry with type
+ * id equal to the array index has been marked dirty while pinned
+ * in the current epoch.
+ *
+ * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry
+ * with type id equal to the array index has been flushed while
+ * pinned in the current epoch.
+ *
+ * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry
+ * with type id equal to the array index has been cleared while
+ * pinned in the current epoch.
+ *
+ * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has increased in
+ * size in the current epoch.
+ *
+ * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
+ * with type id equal to the array index has decreased in
+ * size in the current epoch.
+ *
+ * entry_flush_size_changes: Array of int64 of length
+ * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
+ * the number of times an entry with type id equal to the
+ * array index has changed size while in its pre-serialize
+ * callback.
+ *
+ * cache_flush_size_changes: Array of int64 of length
+ * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
+ * the number of times an entry with type id equal to the
+ * array index has changed size during a cache flush
+ *
+ * total_ht_insertions: Number of times entries have been inserted into the
+ * hash table in the current epoch.
+ *
+ * total_ht_deletions: Number of times entries have been deleted from the
+ * hash table in the current epoch.
+ *
+ * successful_ht_searches: int64 containing the total number of successful
+ * searches of the hash table in the current epoch.
+ *
+ * total_successful_ht_search_depth: int64 containing the total number of
+ * entries other than the targets examined in successful
+ * searches of the hash table in the current epoch.
+ *
+ * failed_ht_searches: int64 containing the total number of unsuccessful
+ * searches of the hash table in the current epoch.
+ *
+ * total_failed_ht_search_depth: int64 containing the total number of
+ * entries examined in unsuccessful searches of the hash
+ * table in the current epoch.
+ *
+ * max_index_len: Largest value attained by the index_len field in the
+ * current epoch.
+ *
+ * max_index_size: Largest value attained by the index_size field in the
+ * current epoch.
+ *
+ * max_clean_index_size: Largest value attained by the clean_index_size field
+ * in the current epoch.
+ *
+ * max_dirty_index_size: Largest value attained by the dirty_index_size field
+ * in the current epoch.
+ *
+ * max_slist_len: Largest value attained by the slist_len field in the
+ * current epoch.
+ *
+ * max_slist_size: Largest value attained by the slist_size field in the
+ * current epoch.
+ *
+ * max_pl_len: Largest value attained by the pl_len field in the
+ * current epoch.
+ *
+ * max_pl_size: Largest value attained by the pl_size field in the
+ * current epoch.
+ *
+ * max_pel_len: Largest value attained by the pel_len field in the
+ * current epoch.
+ *
+ * max_pel_size: Largest value attained by the pel_size field in the
+ * current epoch.
+ *
+ * calls_to_msic: Total number of calls to H5C_make_space_in_cache
+ *
+ * total_entries_skipped_in_msic: Number of clean entries skipped while
+ * enforcing the min_clean_fraction in H5C_make_space_in_cache().
+ *
+ * total_entries_scanned_in_msic: Number of clean entries skipped while
+ * enforcing the min_clean_fraction in H5C_make_space_in_cache().
+ *
+ * max_entries_skipped_in_msic: Maximum number of clean entries skipped
+ * in any one call to H5C_make_space_in_cache().
+ *
+ * max_entries_scanned_in_msic: Maximum number of entries scanned over
+ * in any one call to H5C_make_space_in_cache().
+ *
+ * entries_scanned_to_make_space: Number of entries scanned only when looking
+ * for entries to evict in order to make space in cache.
+ *
+ *
+ * As entries are now capable of moving, loading, dirtying, and deleting
+ * other entries in their pre_serialize and serialize callbacks, it has
+ * been necessary to insert code to restart scans of lists so as to avoid
+ * improper behavior if the next entry in the list is the target of one on
+ * these operations.
+ *
+ * The following fields are use to count such occurances. They are used
+ * both in tests (to verify that the scan has been restarted), and to
+ * obtain estimates of how frequently these restarts occur.
+ *
+ * slist_scan_restarts: Number of times a scan of the slist (that contains
+ * calls to H5C_flush_single_entry()) has been restarted to
+ * avoid potential issues with change of status of the next
+ * entry in the scan.
+ *
+ * LRU_scan_restarts: Number of times a scan of the LRU list (that contains
+ * calls to H5C_flush_single_entry()) has been restarted to
+ * avoid potential issues with change of status of the next
+ * entry in the scan.
+ *
+ * hash_bucket_scan_restarts: Number of times a scan of a hash bucket list
+ * (that contains calls to H5C_flush_single_entry()) has been
+ * restarted to avoid potential issues with change of status
+ * of the next entry in the scan.
+ *
+ * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
+ * and H5C_COLLECT_CACHE_ENTRY_STATS are true.
+ *
+ * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been
+ * accessed in the current epoch.
+ *
+ * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the minimum number of times any single
+ * entry with type id equal to the array index has been
+ * accessed in the current epoch.
+ *
+ * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been cleared
+ * in the current epoch.
+ *
+ * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times any single
+ * entry with type id equal to the array index has been
+ * flushed in the current epoch.
+ *
+ * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum size of any single entry
+ * with type id equal to the array index that has resided in
+ * the cache in the current epoch.
+ *
+ * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * are used to record the maximum number of times that any single
+ * entry with type id equal to the array index that has been
+ * marked as pinned in the cache in the current epoch.
+ *
+ *
+ * Fields supporting testing:
+ *
+ * prefix Array of char used to prefix debugging output. The
+ * field is intended to allow marking of output of with
+ * the processes mpi rank.
+ *
+ * get_entry_ptr_from_addr_counter: Counter used to track the number of
+ * times the H5C_get_entry_ptr_from_addr() function has been
+ * called successfully. This field is only defined when
+ * NDEBUG is not #defined.
+ *
+ ****************************************************************************/
+struct H5C_t {
+ uint32_t magic;
+ hbool_t flush_in_progress;
+ FILE * trace_file_ptr;
+ void * aux_ptr;
+ int32_t max_type_id;
+ const char * (* type_name_table_ptr);
+ size_t max_cache_size;
+ size_t min_clean_size;
+ H5C_write_permitted_func_t check_write_permitted;
+ hbool_t write_permitted;
+ H5C_log_flush_func_t log_flush;
+ hbool_t evictions_enabled;
+
+ /* Fields for maintaining [hash table] index of entries */
+ int32_t index_len;
+ size_t index_size;
+ size_t clean_index_size;
+ size_t dirty_index_size;
+ H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]);
+
+ /* Fields to detect entries removed during scans */
+ int64_t entries_removed_counter;
+ H5C_cache_entry_t * last_entry_removed_ptr;
+
+ /* Field to disable tag validation */
+ hbool_t ignore_tags;
+
+ /* Fields for maintaining list of in-order entries, for flushing */
+ hbool_t slist_changed;
+ hbool_t slist_change_in_pre_serialize;
+ hbool_t slist_change_in_serialize;
+ int32_t slist_len;
+ size_t slist_size;
+ H5SL_t * slist_ptr;
+ int32_t num_last_entries;
+#if H5C_DO_SANITY_CHECKS
+ int64_t slist_len_increase;
+ int64_t slist_size_increase;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* Fields for tracking protected entries */
+ int32_t pl_len;
+ size_t pl_size;
+ H5C_cache_entry_t * pl_head_ptr;
+ H5C_cache_entry_t * pl_tail_ptr;
+
+ /* Fields for tracking pinned entries */
+ int32_t pel_len;
+ size_t pel_size;
+ H5C_cache_entry_t * pel_head_ptr;
+ H5C_cache_entry_t * pel_tail_ptr;
+
+ /* Fields for complete LRU list of entries */
+ int32_t LRU_list_len;
+ size_t LRU_list_size;
+ H5C_cache_entry_t * LRU_head_ptr;
+ H5C_cache_entry_t * LRU_tail_ptr;
+
+ /* Fields for clean LRU list of entries */
+ int32_t cLRU_list_len;
+ size_t cLRU_list_size;
+ H5C_cache_entry_t * cLRU_head_ptr;
+ H5C_cache_entry_t * cLRU_tail_ptr;
+
+ /* Fields for dirty LRU list of entries */
+ int32_t dLRU_list_len;
+ size_t dLRU_list_size;
+ H5C_cache_entry_t * dLRU_head_ptr;
+ H5C_cache_entry_t * dLRU_tail_ptr;
+
+ /* Fields for automatic cache size adjustment */
+ hbool_t size_increase_possible;
+ hbool_t flash_size_increase_possible;
+ size_t flash_size_increase_threshold;
+ hbool_t size_decrease_possible;
+ hbool_t resize_enabled;
+ hbool_t cache_full;
+ hbool_t size_decreased;
+ H5C_auto_size_ctl_t resize_ctl;
+
+ /* Fields for epoch markers used in automatic cache size adjustment */
+ int32_t epoch_markers_active;
+ hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS];
+ int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1];
+ int32_t epoch_marker_ringbuf_first;
+ int32_t epoch_marker_ringbuf_last;
+ int32_t epoch_marker_ringbuf_size;
+ H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS];
+
+ /* Fields for cache hit rate collection */
+ int64_t cache_hits;
+ int64_t cache_accesses;
+
+#if H5C_COLLECT_CACHE_STATS
+ /* stats fields */
+ int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t take_ownerships[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
+
+ /* Fields for hash table operations */
+ int64_t total_ht_insertions;
+ int64_t total_ht_deletions;
+ int64_t successful_ht_searches;
+ int64_t total_successful_ht_search_depth;
+ int64_t failed_ht_searches;
+ int64_t total_failed_ht_search_depth;
+ int32_t max_index_len;
+ size_t max_index_size;
+ size_t max_clean_index_size;
+ size_t max_dirty_index_size;
+
+ /* Fields for in-order skip list */
+ int32_t max_slist_len;
+ size_t max_slist_size;
+
+ /* Fields for protected entry list */
+ int32_t max_pl_len;
+ size_t max_pl_size;
+
+ /* Fields for pinned entry list */
+ int32_t max_pel_len;
+ size_t max_pel_size;
+
+ /* Fields for tacking 'make space in cache' (msic) operations */
+ int64_t calls_to_msic;
+ int64_t total_entries_skipped_in_msic;
+ int64_t total_entries_scanned_in_msic;
+ int32_t max_entries_skipped_in_msic;
+ int32_t max_entries_scanned_in_msic;
+ int64_t entries_scanned_to_make_space;
+
+ /* Fields for tracking skip list scan restarts */
+ int64_t slist_scan_restarts;
+ int64_t LRU_scan_restarts;
+ int64_t hash_bucket_scan_restarts;
+
+#if H5C_COLLECT_CACHE_ENTRY_STATS
+ int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
+ size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1];
+#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ char prefix[H5C__PREFIX_LEN];
+
+#ifndef NDEBUG
+ int64_t get_entry_ptr_from_addr_counter;
+#endif /* NDEBUG */
+};
+
+/*****************************/
+/* Package Private Variables */
+/*****************************/
+
+
+/******************************/
+/* Package Private Prototypes */
+/******************************/
+
+
#endif /* _H5Cpkg_H */
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index c9679f4..a9ffb70 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -36,10 +36,34 @@
#include "H5private.h" /* Generic Functions */
#include "H5Fprivate.h" /* File access */
+/**************************/
+/* Library Private Macros */
+/**************************/
-#define H5C_DO_SANITY_CHECKS 0
+#ifndef NDEBUG
+#define H5C_DO_SANITY_CHECKS 1
+#define H5C_DO_SLIST_SANITY_CHECKS 0
#define H5C_DO_TAGGING_SANITY_CHECKS 1
#define H5C_DO_EXTREME_SANITY_CHECKS 0
+#else /* NDEBUG */
+/* With rare execptions, the following defines should be set
+ * to 0 if NDEBUG is defined
+ */
+#define H5C_DO_SANITY_CHECKS 0
+#define H5C_DO_SLIST_SANITY_CHECKS 0
+#define H5C_DO_TAGGING_SANITY_CHECKS 0
+#define H5C_DO_EXTREME_SANITY_CHECKS 0
+#endif /* NDEBUG */
+
+/* Note: The memory sanity checks aren't going to work until I/O filters are
+ * changed to call a particular alloc/free routine for their buffers,
+ * because the H5AC__SERIALIZE_RESIZED_FLAG set by the fractal heap
+ * direct block serialize callback calls H5Z_pipeline(). When the I/O
+ * filters are changed, then we should implement "cache image alloc/free"
+ * routines that the fractal heap direct block (and global heap) serialize
+ * calls can use when resizing (and re-allocating) their image in the
+ * cache. -QAK */
+#define H5C_DO_MEMORY_SANITY_CHECKS 0
/* This sanity checking constant was picked out of the air. Increase
* or decrease it if appropriate. Its purposes is to detect corrupt
@@ -50,9 +74,15 @@
#define H5C_MAX_ENTRY_SIZE ((size_t)(32 * 1024 * 1024))
/* H5C_COLLECT_CACHE_STATS controls overall collection of statistics
- * on cache activity. In general, this #define should be set to 0.
+ * on cache activity. In general, this #define should be set to 1 in
+ * debug mode, and 0 in production mode..
*/
+
+#ifndef NDEBUG
+#define H5C_COLLECT_CACHE_STATS 1
+#else /* NDEBUG */
#define H5C_COLLECT_CACHE_STATS 0
+#endif /* NDEBUG */
/* H5C_COLLECT_CACHE_ENTRY_STATS controls collection of statistics
* in individual cache entries.
@@ -61,123 +91,38 @@
* H5C_COLLECT_CACHE_STATS is also defined to true.
*/
#if H5C_COLLECT_CACHE_STATS
-
#define H5C_COLLECT_CACHE_ENTRY_STATS 1
-
#else
-
#define H5C_COLLECT_CACHE_ENTRY_STATS 0
-
#endif /* H5C_COLLECT_CACHE_STATS */
-
#ifdef H5_HAVE_PARALLEL
-
/* we must maintain the clean and dirty LRU lists when we are compiled
* with parallel support.
*/
#define H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 1
-
#else /* H5_HAVE_PARALLEL */
-
/* The clean and dirty LRU lists don't buy us anything here -- we may
* want them on for testing on occasion, but in general they should be
* off.
*/
#define H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 0
-
#endif /* H5_HAVE_PARALLEL */
-
-/* Typedef for the main structure for the cache (defined in H5Cpkg.h) */
-
-typedef struct H5C_t H5C_t;
-
-
-/*
- * Class methods pertaining to caching. Each type of cached object will
- * have a constant variable with permanent life-span that describes how
- * to cache the object. That variable will be of type H5C_class_t and
- * have the following required fields...
- *
- * LOAD: Loads an object from disk to memory. The function
- * should allocate some data structure and return it.
- *
- * FLUSH: Writes some data structure back to disk. It would be
- * wise for the data structure to include dirty flags to
- * indicate whether it really needs to be written. This
- * function is also responsible for freeing memory allocated
- * by the LOAD method if the DEST argument is non-zero (by
- * calling the DEST method).
- *
- * DEST: Just frees memory allocated by the LOAD method.
- *
- * CLEAR: Just marks object as non-dirty.
- *
- * NOTIFY: Notify client that an action on an entry has taken/will take
- * place
- *
- * SIZE: Report the size (on disk) of the specified cache object.
- * Note that the space allocated on disk may not be contiguous.
- */
-
-#define H5C_CALLBACK__NO_FLAGS_SET 0x0
-#define H5C_CALLBACK__SIZE_CHANGED_FLAG 0x1
-#define H5C_CALLBACK__MOVED_FLAG 0x2
-
-/* Actions that can be reported to 'notify' client callback */
-typedef enum H5C_notify_action_t {
- H5C_NOTIFY_ACTION_AFTER_INSERT, /* Entry has been added to the cache */
- /* (could be loaded from file with
- * 'protect' call, or inserted
- * with 'set' call)
- */
- H5C_NOTIFY_ACTION_BEFORE_EVICT /* Entry is about to be evicted from cache */
-} H5C_notify_action_t;
-
-typedef void *(*H5C_load_func_t)(H5F_t *f,
- hid_t dxpl_id,
- haddr_t addr,
- void *udata);
-typedef herr_t (*H5C_flush_func_t)(H5F_t *f,
- hid_t dxpl_id,
- hbool_t dest,
- haddr_t addr,
- void *thing,
- unsigned * flags_ptr);
-typedef herr_t (*H5C_dest_func_t)(H5F_t *f,
- void *thing);
-typedef herr_t (*H5C_clear_func_t)(H5F_t *f,
- void *thing,
- hbool_t dest);
-typedef herr_t (*H5C_notify_func_t)(H5C_notify_action_t action,
- void *thing);
-typedef herr_t (*H5C_size_func_t)(const H5F_t *f,
- const void *thing,
- size_t *size_ptr);
-
-typedef struct H5C_class_t {
- int id;
- H5C_load_func_t load;
- H5C_flush_func_t flush;
- H5C_dest_func_t dest;
- H5C_clear_func_t clear;
- H5C_notify_func_t notify;
- H5C_size_func_t size;
-} H5C_class_t;
-
-
-/* Type defintions of call back functions used by the cache as a whole */
-
-typedef herr_t (*H5C_write_permitted_func_t)(const H5F_t *f,
- hid_t dxpl_id,
- hbool_t * write_permitted_ptr);
-
-typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
- haddr_t addr,
- hbool_t was_dirty,
- unsigned flags,
- int type_id);
+/* Flags for cache client class behavior */
+#define H5C__CLASS_NO_FLAGS_SET ((unsigned)0x0)
+#define H5C__CLASS_SPECULATIVE_LOAD_FLAG ((unsigned)0x1)
+#define H5C__CLASS_COMPRESSED_FLAG ((unsigned)0x2)
+/* The following flags may only appear in test code */
+#define H5C__CLASS_NO_IO_FLAG ((unsigned)0x4)
+#define H5C__CLASS_SKIP_READS ((unsigned)0x8)
+#define H5C__CLASS_SKIP_WRITES ((unsigned)0x10)
+
+/* Flags for pre-serialize callback */
+#define H5C__SERIALIZE_NO_FLAGS_SET ((unsigned)0)
+#define H5C__SERIALIZE_RESIZED_FLAG ((unsigned)0x1)
+#define H5C__SERIALIZE_MOVED_FLAG ((unsigned)0x2)
+#define H5C__SERIALIZE_COMPRESSED_FLAG ((unsigned)0x4)
/* Upper and lower limits on cache size. These limits are picked
* out of a hat -- you should be able to change them as necessary.
@@ -187,15 +132,12 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* upper bound on cache size is rather large for the current hash table
* size.
*/
-
#define H5C__MAX_MAX_CACHE_SIZE ((size_t)(128 * 1024 * 1024))
#define H5C__MIN_MAX_CACHE_SIZE ((size_t)(1024))
-
/* Default max cache size and min clean size are give here to make
* them generally accessable.
*/
-
#define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024))
#define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024))
@@ -205,10 +147,964 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* array needs 4 levels, plus another 2 levels are needed: one for the layer
* under the extensible array and one for the layer above it).
*/
-
#define H5C__NUM_FLUSH_DEP_HEIGHTS 6
+/* Values for cache entry magic field */
+#define H5C__H5C_CACHE_ENTRY_T_MAGIC 0x005CAC0A
+#define H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC 0xDeadBeef
+
+/* Cache configuration validation definitions */
+#define H5C_RESIZE_CFG__VALIDATE_GENERAL 0x1
+#define H5C_RESIZE_CFG__VALIDATE_INCREMENT 0x2
+#define H5C_RESIZE_CFG__VALIDATE_DECREMENT 0x4
+#define H5C_RESIZE_CFG__VALIDATE_INTERACTIONS 0x8
+#define H5C_RESIZE_CFG__VALIDATE_ALL \
+( \
+ H5C_RESIZE_CFG__VALIDATE_GENERAL | \
+ H5C_RESIZE_CFG__VALIDATE_INCREMENT | \
+ H5C_RESIZE_CFG__VALIDATE_DECREMENT | \
+ H5C_RESIZE_CFG__VALIDATE_INTERACTIONS \
+)
+
+/* Cache configuration versions */
+#define H5C__CURR_AUTO_SIZE_CTL_VER 1
+#define H5C__CURR_AUTO_RESIZE_RPT_FCN_VER 1
+/* Number of epoch markers active */
+#define H5C__MAX_EPOCH_MARKERS 10
+
+/* Default configuration settings */
+#define H5C__DEF_AR_UPPER_THRESHHOLD 0.9999f
+#define H5C__DEF_AR_LOWER_THRESHHOLD 0.9f
+#define H5C__DEF_AR_MAX_SIZE ((size_t)(16 * 1024 * 1024))
+#define H5C__DEF_AR_INIT_SIZE ((size_t)( 1 * 1024 * 1024))
+#define H5C__DEF_AR_MIN_SIZE ((size_t)( 1 * 1024 * 1024))
+#define H5C__DEF_AR_MIN_CLEAN_FRAC 0.5f
+#define H5C__DEF_AR_INCREMENT 2.0f
+#define H5C__DEF_AR_MAX_INCREMENT ((size_t)( 2 * 1024 * 1024))
+#define H5C__DEF_AR_FLASH_MULTIPLE 1.0f
+#define H5C__DEV_AR_FLASH_THRESHOLD 0.25f
+#define H5C__DEF_AR_DECREMENT 0.9f
+#define H5C__DEF_AR_MAX_DECREMENT ((size_t)( 1 * 1024 * 1024))
+#define H5C__DEF_AR_EPCHS_B4_EVICT 3
+#define H5C__DEF_AR_EMPTY_RESERVE 0.05f
+#define H5C__MIN_AR_EPOCH_LENGTH 100
+#define H5C__DEF_AR_EPOCH_LENGTH 50000
+#define H5C__MAX_AR_EPOCH_LENGTH 1000000
+
+/* #defines of flags used in the flags parameters in some of the
+ * cache calls. Note that not all flags are applicable
+ * to all function calls. Flags that don't apply to a particular
+ * function are ignored in that function.
+ *
+ * These flags apply to all function calls:
+ * H5C__NO_FLAGS_SET (generic "no flags set" for all fcn calls)
+ *
+ *
+ * These flags apply to H5C_insert_entry():
+ * H5C__SET_FLUSH_MARKER_FLAG
+ * H5C__PIN_ENTRY_FLAG
+ * H5C__FLUSH_LAST_FLAG ; super block only
+ * H5C__FLUSH_COLLECTIVELY_FLAG ; super block only
+ *
+ * These flags apply to H5C_protect()
+ * H5C__READ_ONLY_FLAG
+ * H5C__FLUSH_LAST_FLAG ; super block only
+ * H5C__FLUSH_COLLECTIVELY_FLAG ; super block only
+ *
+ * These flags apply to H5C_unprotect():
+ * H5C__SET_FLUSH_MARKER_FLAG
+ * H5C__DELETED_FLAG
+ * H5C__DIRTIED_FLAG
+ * H5C__PIN_ENTRY_FLAG
+ * H5C__UNPIN_ENTRY_FLAG
+ * H5C__FREE_FILE_SPACE_FLAG
+ * H5C__TAKE_OWNERSHIP_FLAG
+ *
+ * These flags apply to H5C_expunge_entry():
+ * H5C__FREE_FILE_SPACE_FLAG
+ *
+ * These flags apply to H5C_flush_cache():
+ * H5C__FLUSH_INVALIDATE_FLAG
+ * H5C__FLUSH_CLEAR_ONLY_FLAG
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG
+ * H5C__FLUSH_IGNORE_PROTECTED_FLAG (can't use this flag in combination
+ * with H5C__FLUSH_INVALIDATE_FLAG)
+ *
+ * These flags apply to H5C_flush_single_entry():
+ * H5C__FLUSH_INVALIDATE_FLAG
+ * H5C__FLUSH_CLEAR_ONLY_FLAG
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG
+ * H5C__TAKE_OWNERSHIP_FLAG
+ */
+#define H5C__NO_FLAGS_SET 0x0000
+#define H5C__SET_FLUSH_MARKER_FLAG 0x0001
+#define H5C__DELETED_FLAG 0x0002
+#define H5C__DIRTIED_FLAG 0x0004
+#define H5C__PIN_ENTRY_FLAG 0x0008
+#define H5C__UNPIN_ENTRY_FLAG 0x0010
+#define H5C__FLUSH_INVALIDATE_FLAG 0x0020
+#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x0040
+#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0080
+#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0100
+#define H5C__READ_ONLY_FLAG 0x0200
+#define H5C__FREE_FILE_SPACE_FLAG 0x0800
+#define H5C__TAKE_OWNERSHIP_FLAG 0x1000
+#define H5C__FLUSH_LAST_FLAG 0x2000
+#define H5C__FLUSH_COLLECTIVELY_FLAG 0x4000
+
+
+/****************************/
+/* Library Private Typedefs */
+/****************************/
+
+/* Typedef for the main structure for the cache (defined in H5Cpkg.h) */
+typedef struct H5C_t H5C_t;
+
+
+/***************************************************************************
+ *
+ * Struct H5C_class_t
+ *
+ * Instances of H5C_class_t are used to specify the callback functions
+ * used by the metadata cache for each class of metadata cache entry.
+ * The fields of the structure are discussed below:
+ *
+ * id: Integer field containing the unique ID of the class of metadata
+ * cache entries.
+ *
+ * name: Pointer to a string containing the name of the class of metadata
+ * cache entries.
+ *
+ * mem_type: Instance of H5FD_mem_t, that is used to supply the
+ * mem type passed into H5F_block_read().
+ *
+ * flags: Flags indicating class-specific behavior.
+ *
+ * Whoever created the flags field neglected to document the meanings
+ * of the flags he created. Hence the following discussion of the
+ * H5C__CLASS_SPECULATIVE_LOAD_FLAG and (to a lesser extent)
+ * H5C__CLASS_COMPRESSED_FLAG should be viewed with suspicion,
+ * as the meanings are divined from the source code, and thus may be
+ * inaccurate. Please correct any errors you find.
+ *
+ * Possible flags are:
+ *
+ * H5C__CLASS_NO_FLAGS_SET: No special processing.
+ *
+ * H5C__CLASS_SPECULATIVE_LOAD_FLAG: This flag appears to be used
+ * only in H5C_load_entry(). When it is set, entries are
+ * permitted to change their sizes on the first attempt
+ * to load.
+ *
+ * If the new size is larger than the old, the read buffer
+ * is reallocated to the new size, loaded from file, and the
+ * deserialize routine is called a second time on the
+ * new buffer. The entry returned by the first call to
+ * the deserialize routine is discarded (via the free_icr
+ * call) after the new size is retrieved (via the image_len
+ * call). Note that the new size is used as the size of the
+ * entry in the cache.
+ *
+ * If the new size is smaller than the old, no new loads
+ * or desearializes are performed, but the new size becomes
+ * the size of the entry in the cache.
+ *
+ * When this flag is set, an attempt to read past the
+ * end of file is pemitted. In this case, if the size
+ * returned get_load_size callback would result in a
+ * read past the end of file, the size is trunkated to
+ * avoid this, and processing proceeds as normal.
+ *
+ * H5C__CLASS_COMPRESSED_FLAG: This flags indicates that the entry
+ * may be compressed -- i.e. its on disk image is run through
+ * filters on the way to and from disk. Thus the uncompressed
+ * (or unfiltered) size of the entry may be different from the
+ * size of the entry in file.
+ *
+ * This has the following implications:
+ *
+ * On load, uncompressed size and load size may be different.
+ * Presumably, load size will be smaller than uncompressed
+ * size, but there is no requirement for this in the code
+ * (but note that I have inserted an assertion to this effect,
+ * which has not been triggered to date).
+ *
+ * On insertion, compressed (AKA filtered, AKA on disk) size
+ * is unknown, as the entry has yet to be run through filters.
+ * Compressed size is computed whenever the entry is
+ * written (or the image is updated -- not relevant until
+ * journaling is brought back).
+ *
+ * On dirty (of a clean entry), compressed (AKA filtered,
+ * AKA on disk) size becomes unknown. Thus, compressed size
+ * must be computed by the pre-serialize callback before the
+ * entry may be written.
+ *
+ * Once the compressed size becomes unknown, it remains so
+ * until the on disk image is constructed.
+ *
+ * Observe that the cache needs to know the size of the entry
+ * for space allocation purposes. Since the compressed size
+ * can change or become unknown, it uses the uncompressed
+ * size which may change, but which should always be known.
+ * The compressed size is used only for journaling and disk I/O.
+ *
+ * While there is no logical reason why they could not be
+ * combined, due to absence of need and for simplicity of code,
+ * the cache does not permit both the the
+ * H5C__CLASS_COMPRESSED_FLAG and the
+ * H5C__CLASS_SPECULATIVE_LOAD_FLAG to be set in the same
+ * instance of H5C_class_t.
+ *
+ * The following flags may only appear in test code.
+ *
+ * H5C__CLASS_NO_IO_FLAG: This flag is intended only for use in test
+ * code. When it is set, any attempt to load an entry of
+ * the type with this flag set will trigger an assertion
+ * failure, and any flush of an entry with this flag set
+ * will not result in any write to file.
+ *
+ * H5C__CLASS_SKIP_READS: This flags is intended only for use in test
+ * code. When it is set, reads on load will be skipped,
+ * and an uninitialize buffer will be passed to the
+ * deserialize function.
+ *
+ * H5C__CLASS_SKIP_WRITES: This flags is intended only for use in test
+ * code. When it is set, writes of buffers prepared by the
+ * serialize callback will be skipped.
+ *
+ * GET_LOAD_SIZE: Pointer to the 'get load size' function.
+ *
+ * This function must be able to determine the size of the disk image of
+ * a metadata cache entry, given the 'udata' that will be passed to the
+ * 'deserialize' callback.
+ *
+ * Note that if either the H5C__CLASS_SPECULATIVE_LOAD_FLAG or
+ * the H5C__CLASS_COMPRESSED_FLAG is set, the disk image size
+ * returned by this callback is either a first guess (if the
+ * H5C__CLASS_SPECULATIVE_LOAD_FLAG is set) or (if the
+ * H5C__CLASS_COMPRESSED_FLAG is set), the exact on disk size
+ * of the entry whether it has been run through filters or not.
+ * In all other cases, the value returned should be the correct
+ * uncompressed size of the entry.
+ *
+ * The typedef for the deserialize callback is as follows:
+ *
+ * typedef herr_t (*H5C_get_load_size_func_t)(void *udata_ptr,
+ * size_t *image_len_ptr);
+ *
+ * The parameters of the deserialize callback are as follows:
+ *
+ * udata_ptr: Pointer to user data provided in the protect call, which
+ * will also be passed through to the deserialize callback.
+ *
+ * image_len_ptr: Pointer to the location in which the length in bytes
+ * of the in file image to be deserialized is to be returned.
+ *
+ * This value is used by the cache to determine the size of
+ * the disk image for the metadata, in order to read the disk
+ * image from the file.
+ *
+ * Processing in the get_load_size function should proceed as follows:
+ *
+ * If successful, the function will place the length of the on disk
+ * image associated with supplied user data in *image_len_ptr, and
+ * then return SUCCEED.
+ *
+ * On failure, the function must return FAIL and push error information
+ * onto the error stack with the error API routines, without modifying
+ * the value pointed to by the image_len_ptr.
+ *
+ *
+ * DESERIALIZE: Pointer to the deserialize function.
+ *
+ * This function must be able to read a buffer containing the on disk
+ * image of a metadata cache entry, allocate and load the equivalent
+ * in core representation, and return a pointer to that representation.
+ *
+ * The typedef for the deserialize callback is as follows:
+ *
+ * typedef void *(*H5C_deserialize_func_t)(const void * image_ptr,
+ * size_t len,
+ * void * udata_ptr,
+ * boolean * dirty_ptr);
+ *
+ * The parameters of the deserialize callback are as follows:
+ *
+ * image_ptr: Pointer to a buffer of length len containing the
+ * contents of the file starting at addr and continuing
+ * for len bytes.
+ *
+ * len: Length in bytes of the in file image to be deserialized.
+ *
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
+ *
+ * udata_ptr: Pointer to user data provided in the protect call, which
+ * must be passed through to the deserialize callback.
+ *
+ * dirty_ptr: Pointer to boolean which the deserialize function
+ * must use to mark the entry dirty if it has to modify
+ * the entry to clean up file corruption left over from
+ * an old bug in the HDF5 library.
+ *
+ * Processing in the deserialize function should proceed as follows:
+ *
+ * If the image contains valid data, and is of the correct length,
+ * the deserialize function must allocate space for an in core
+ * representation of that data, load the contents of the image into
+ * the space allocated for the in core representation, and return
+ * a pointer to the in core representation. Observe that an
+ * instance of H5C_cache_entry_t must be the first item in this
+ * representation. The cache will initialize it after the callback
+ * returns.
+ *
+ * Note that the structure of the in core representation is otherwise
+ * up to the cache client. All that is required is that the pointer
+ * returned be sufficient for the clients purposes when it is returned
+ * on a protect call.
+ *
+ * If the deserialize function has to clean up file corruption
+ * left over from an old bug in the HDF5 library, it must set
+ * *dirty_ptr to TRUE. If it doesn't, no action is needed as
+ * *dirty_ptr will be set to FALSE before the deserialize call.
+ *
+ * If the operation fails for any reason (i.e. bad data in buffer, bad
+ * buffer length, malloc failure, etc.) the function must return NULL and
+ * push error information on the error stack with the error API routines.
+ *
+ * Exceptions to the above:
+ *
+ * If the H5C__CLASS_SPECULATIVE_LOAD_FLAG is set, the buffer supplied
+ * to the function need not be currect on the first invocation of the
+ * callback in any single attempt to load the entry.
+ *
+ * In this case, if the buffer is larger than necessary, the function
+ * should load the entry as described above and not flag an error due
+ * to the oversized buffer. The cache will correct its mis-apprehension
+ * of the entry size with a subsequent call to the image_len callback.
+ *
+ * If the buffer is too small, and this is the first deserialize call
+ * in the entry load operation, the function should not flag an error.
+ * Instead, it must compute the correct size of the entry, allocate an
+ * in core representation and initialize it to the extent that an
+ * immediate call to the image len callback will return the correct
+ * image size.
+ *
+ * In this case, when the deserialize callback returns, the cache will
+ * call the image length callback, realize that the supplied buffer was
+ * too small, discard the returned in core representation, allocate
+ * and load a new buffer of the correct size from file, and then call
+ * the deserialize callback again.
+ *
+ * If the H5C__CLASS_COMPRESSED_FLAG is set, exceptions are as per the
+ * H5C__CLASS_SPECULATIVE_LOAD_FLAG, save that only oversized buffers
+ * are permitted.
+ *
+ *
+ * IMAGE_LEN: Pointer to the image length callback.
+ *
+ * This callback exists primarily to support
+ * H5C__CLASS_SPECULATIVE_LOAD_FLAG and H5C__CLASS_COMPRESSED_FLAG
+ * discussed above, although it is also used to obtain the size of
+ * newly inserted entries.
+ *
+ * In the case of the H5C__CLASS_SPECULATIVE_LOAD_FLAG, it is used to
+ * allow the client to change the size of an entry in the deserialize
+ * callback.
+ *
+ * For the H5C__CLASS_COMPRESSED_FLAG, it is used to allow the client
+ * to indicate whether the entry is compressed (i.e. whether entries
+ * are run through filters) and if so, to report both the uncompressed
+ * and the compressed entry size (i.e. the actual on disk size after
+ * the entry has been run through filters) if that value is known.
+ *
+ * The callback is also used in H5C_insert_entry() to obtain the
+ * size of the newly inserted entry.
+ *
+ * The typedef for the image_len callback is as follows:
+ *
+ * typedef herr_t (*H5C_image_len_func_t)(void *thing,
+ * size_t *image_len_ptr,
+ * hbool_t *compressed_ptr,
+ * size_t *compressed_image_len_ptr);
+ *
+ * The parameters of the image_len callback are as follows:
+ *
+ * thing: Pointer to the in core representation of the entry.
+ *
+ * image_len_ptr: Pointer to size_t in which the callback will return
+ * the length (in bytes) of the cache entry.
+ *
+ * If the H5C__CLASS_COMPRESSED_FLAG is not set in the
+ * associated instance of H5C_class_t, or if the flag is
+ * set, and the callback sets *compressed_ptr to FALSE,
+ * this size is the actual size of the entry on disk.
+ *
+ * Otherwise, this size is the uncompressed size of the
+ * entry -- which the cache will use for all purposes OTHER
+ * than journal writes and disk I/O.
+ *
+ * compressed_ptr: Pointer to a boolean flag indicating whether
+ * the cache entry will be compressed / uncompressed on
+ * disk writes / reads.
+ *
+ * If the H5C__CLASS_COMPRESSED_FLAG is not set in the
+ * associated instance of H5C_class_t, *compressed_ptr
+ * must be set to FALSE.
+ *
+ * If the H5C__CLASS_COMPRESSED_FLAG is set in the
+ * associated instance of H5C_class_t, and filters are
+ * not enabled, *compressed_ptr must be set to FALSE.
+ *
+ * If the H5C__CLASS_COMPRESSED_FLAG is set in the
+ * associated instance of H5C_class_t, and filters are
+ * enabled, the callback must set *compressed_ptr to TRUE.
+ *
+ * Note that *compressed_ptr will always be set to FALSE
+ * by the caller prior to invocation of the callback. Thus
+ * callbacks for clients that don't set the
+ * H5C__CLASS_COMPRESSED_FLAG can ignore this parameter.
+ *
+ * compressed_image_len_ptr: Pointer to size_t in which the callback
+ * may return the length (in bytes) of the compressed on
+ * disk image of the entry, or the uncompressed size if the
+ * compressed size has not yet been calculated.
+ *
+ * Since computing the compressed image len is expensive,
+ * the callback should only report the most recently computed
+ * value -- which will typically be incorrect if the entry
+ * is dirty.
+ *
+ * If *compressed_ptr is set to FALSE, *compressed_image_len_ptr
+ * should be set to zero. However, as *compressed_image_len_ptr
+ * will be initialize to zero prior to the call, the callback
+ * need not modify it if the H5C__CLASS_COMPRESSED_FLAG is
+ * not set.
+ *
+ * If the H5C__CLASS_COMPRESSED_FLAG is not set in the associated
+ * instance of H5C_class_t, processing in the image_len function
+ * should proceed as follows:
+ *
+ * If successful, the function will place the length of the on disk
+ * image associated with the in core representation provided in the
+ * thing parameter in *image_len_ptr, and then return SUCCEED. Since
+ * *compressed_ptr and *compressed_image_len_ptr will be initialized to
+ * FALSE and zero respectively before the call, the callback need not
+ * modify these values, and may declare the associated parameters as
+ * UNUSED.
+ *
+ * If the H5C__CLASS_COMPRESSED_FLAG is set in the associated
+ * instance of H5C_class_t, processing in the image_len function
+ * should proceed as follows:
+ *
+ * If successful, the function will place the uncompressed length of
+ * the on disk image associated with the in core representation
+ * provided in the thing parameter in *image_len_ptr. If filters
+ * are not enabled for the entry, it will set *compressed_ptr to FALSE,
+ * and *compressed_image_len_ptr to zero. If filters are enabled,
+ * it will set *compressed_ptr to TRUE. In this case, it must set
+ * *compressed_image_len_ptr equal to the last computed compressed
+ * if the compressed size, or to the uncompressed size if that value
+ * is yet to be computed. In all cases, it will return SUCCEED if
+ * successful.
+ *
+ * In either case, if the function fails, it must return FAIL and
+ * push error information onto the error stack with the error API
+ * routines, and return without modifying the values pointed to by
+ * the image_len_ptr, compressed_ptr, and compressed_image_len_ptr
+ * parameters.
+ *
+ * PRE_SERIALIZE: Pointer to the pre-serialize callback.
+ *
+ * The pre-serialize callback is invoked by the metadata cache before
+ * it needs a current on-disk image of the metadata entry for purposes
+ * either constructing a journal or flushing the entry to disk.
+ *
+ * If the client needs to change the address or compressed or
+ * uncompressed length of the entry prior to flush, the pre-serialize
+ * callback is responsible for these actions, so that the actual
+ * serialize callback (described below) is only responsible for
+ * serializing the data structure, not moving it on disk or resizing it.
+ *
+ * In addition, the client may use the pre-serialize callback to
+ * ensure that the entry is ready to be flushed -- in particular,
+ * if the entry contains references to other entries that are in
+ * temporary file space, the pre-serialize callback must move those
+ * entries into real file space so that the serialzed entry will
+ * contain no invalid data.
+ *
+ * One would think that the base address and length of
+ * the length of the entry's image on disk would be well known.
+ * However, that need not be the case as free space section info
+ * entries will change size (and possibly location) depending on the
+ * number of blocks of free space being manages, and fractal heap
+ * direct blocks can change compressed size (and possibly location)
+ * on serialization if compression is enabled. Similarly, it may
+ * be necessary to move entries from temporary to real file space.
+ *
+ * The pre-serialize callback must report any such changes to the
+ * cache, which must then update its internal structures as needed.
+ *
+ * The typedef for the pre-serialize callback is as follows:
+ *
+ * typedef herr_t (*H5C_pre_serialize_func_t)(const H5F_t *f,
+ * hid_t dxpl_id,
+ * void * thing,
+ * haddr_t addr,
+ * size_t len,
+ * size_t compressed_len,
+ * haddr_t * new_addr_ptr,
+ * size_t * new_len_ptr,
+ * size_t * new_compressed_len_ptr,
+ * unsigned * flags_ptr);
+ *
+ * The parameters of the pre-serialize callback are as follows:
+ *
+ * f: File pointer -- needed if other metadata cache entries
+ * must be modified in the process of serializing the
+ * target entry.
+ *
+ * dxpl_id: dxpl_id passed with the file pointer to the cache, and
+ * passed on to the callback. Necessary as some callbacks
+ * revise the size and location of the target entry, or
+ * possibly other entries on pre-serialize.
+ *
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry.
+ * This is the same pointer returned by a protect of the
+ * addr and len given above.
+ *
+ * addr: Base address in file of the entry to be serialized.
+ *
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
+ *
+ * len: Length in bytes of the in file image of the entry to be
+ * serialized. Also the size the image passed to the
+ * serialize callback (discussed below) unless either that
+ * value is altered by this function, or the entry will be
+ * compressed. In the latter case, the compressed size
+ * of the entry will be reported in *new_compressed_len_ptr.
+ *
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
+ *
+ * compressed_len: If the entry is to be compressed (i.e. run through
+ * filters) prior to flush, Length in bytes of the last know
+ * compressed size of the entry -- or the uncompressed size
+ * if no such value exists (i.e. the entry has been inserted,
+ * but never flushed). This parameter should be set to zero
+ * in all other cases.
+ *
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
+ *
+ * new_addr_ptr: Pointer to haddr_t. If the entry is moved by
+ * the serialize function, the new on disk base address must
+ * be stored in *new_addr_ptr, and the appropriate flag set
+ * in *flags_ptr.
+ *
+ * If the entry is not moved by the serialize function,
+ * *new_addr_ptr is undefined on pre-serialize callback
+ * return.
+ *
+ * new_len_ptr: Pointer to size_t. If the entry is resized by the
+ * serialize function, the new length of the on disk image
+ * must be stored in *new_len_ptr, and the appropriate flag set
+ * in *flags_ptr.
+ *
+ * If the entry is not resized by the pre-serialize function,
+ * *new_len_ptr is undefined on pre-serialize callback
+ * return.
+ *
+ * new_compressed_len_ptr: Pointer to size_t. If the image will be
+ * compressed (i.e. run through filters) prior to being
+ * written to disk, the compressed size (in bytes) of the
+ * on disk image must be stored in *new_compressed_len_ptr,
+ * and the appropriate flag set in *flags_ptr.
+ *
+ * flags_ptr: Pointer to an unsigned integer used to return flags
+ * indicating whether the preserialize function resized or moved
+ * the entry, or computed its compressed size. If the entry was
+ * neither resized or moved, nor will be compressed,
+ * the serialize function must set *flags_ptr to zero.
+ * H5C__SERIALIZE_RESIZED_FLAG, H5C__SERIALIZE_MOVED_FLAG
+ * and H5C__SERIALIZE_COMPRESSED_FLAG must be set to indicate
+ * a resize, a move, or compression respectively.
+ *
+ * If the H5C__SERIALIZE_RESIZED_FLAG is set, the new length
+ * must be stored in *new_len_ptr.
+ *
+ * If the H5C__SERIALIZE_MOVED_FLAG flag is set, the
+ * new image base address must be stored in *new_addr_ptr.
+ *
+ * If the H5C__SERIALIZE_COMPRESSED_FLAG is set, the
+ * compressed size of the new image must be stored in
+ * *new_compressed_len_ptr.
+ *
+ * Processing in the pre-serialize function should proceed as follows:
+ *
+ * The pre-serialize function must examine the in core representation
+ * indicated by the thing parameter, if the pre-serialize function does
+ * not need to change the size or location of the on-disk image, or
+ * compute its compress size, it must set *flags_ptr to zero.
+ *
+ * If the (uncompressed) size of the on-disk image must be changed,
+ * the pre-serialize function must load the length of the new image
+ * into *new_len_ptr, and set the H5C__SERIALIZE_RESIZED_FLAG in
+ * *flags_ptr.
+ *
+ * If the base address of the on disk image must be changed, the
+ * pre-serialize function must set *new_addr_ptr to the new base address,
+ * and set the H5C__SERIALIZE_MOVED_FLAG in *flags_ptr.
+ *
+ * If the H5C__CLASS_COMPRESSED_FLAG is set in the assocated instance
+ * of H5C_class_t, and filters (i.e. compression) are enabled, the
+ * pre-serialize function must compute the compressed size of the
+ * on disk image, and if it has changed, load this value into
+ * *new_compressed_len_ptr, and set H5C__SERIALIZE_COMPRESSED_FLAG in
+ * *flags_ptr.
+ *
+ * Note that to do this, the preserialize function will typically have
+ * to serialize the entry, and run it through the filters to obtain
+ * the compressed size. For efficiency, the compressed image may
+ * be stored to be copied into the supplied buffer by the
+ * serialize callback. Needless to say this is awkward. We may
+ * want to re-work the API for cache clients to simplify this.
+ *
+ * In addition, the pre-serialize callback may perform any other
+ * processing required before the entry is written to disk
+ *
+ * If it is successful, the function must return SUCCEED.
+ *
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
+ *
+ *
+ * SERIALIZE: Pointer to the serialize callback.
+ *
+ * The serialize callback is invoked by the metadata cache whenever
+ * it needs a current on disk image of the metadata entry for purposes
+ * either constructing a journal entry or flushing the entry to disk.
+ *
+ * At this point, the base address and length of the entry's image on
+ * disk must be well known and not change during the serialization
+ * process.
+ *
+ * While any size and/or location changes must have been handled
+ * by a pre-serialize call, the client may elect to handle any other
+ * changes to the entry required to place it in correct form for
+ * writing to disk in this call.
+ *
+ * The typedef for the serialize callback is as follows:
+ *
+ * typedef herr_t (*H5C_serialize_func_t)(const H5F_t *f,
+ * void * image_ptr,
+ * size_t len,
+ * void * thing);
+ *
+ * The parameters of the serialize callback are as follows:
+ *
+ * f: File pointer -- needed if other metadata cache entries
+ * must be modified in the process of serializing the
+ * target entry.
+ *
+ * image_ptr: Pointer to a buffer of length len bytes into which a
+ * serialized image of the target metadata cache entry is
+ * to be written.
+ *
+ * Note that this buffer will not in general be initialized
+ * to any particular value. Thus the serialize function may
+ * not assume any initial value and must set each byte in
+ * the buffer.
+ *
+ * len: Length in bytes of the in file image of the entry to be
+ * serialized. Also the size of *image_ptr (below). If
+ * compression is not enabled, this value is simply the
+ * uncompressed size of the entry's image on disk. If
+ * compression is enabled, this value is the size of the
+ * compressed image.
+ *
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
+ *
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry.
+ * This is the same pointer returned by a protect of the
+ * addr and len given above.
+ *
+ * Processing in the serialize function should proceed as follows:
+ *
+ * If there are any remaining changes to the entry required before
+ * write to disk, they must be dealt with first.
+ *
+ * The serialize function must then examine the in core
+ * representation indicated by the thing parameter, and write a
+ * serialized (and possibly compressed) image of its contents into
+ * the provided buffer.
+ *
+ * If it is successful, the function must return SUCCEED.
+ *
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
+ *
+ *
+ * NOTIFY: Pointer to the notify callback.
+ *
+ * The notify callback is invoked by the metadata cache when a cache
+ * action on an entry has taken/will take place and the client indicates
+ * it wishes to be notified about the action.
+ *
+ * The typedef for the notify callback is as follows:
+ *
+ * typedef herr_t (*H5C_notify_func_t)(H5C_notify_action_t action,
+ * void *thing);
+ *
+ * The parameters of the notify callback are as follows:
+ *
+ * action: An enum indicating the metadata cache action that has taken/
+ * will take place.
+ *
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry. This
+ * is the same pointer that would be returned by a protect
+ * of the addr and len of the entry.
+ *
+ * Processing in the notify function should proceed as follows:
+ *
+ * The notify function may perform any action it would like, including
+ * metadata cache calls.
+ *
+ * If the function is successful, it must return SUCCEED.
+ *
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
+ *
+ *
+ * FREE_ICR: Pointer to the free ICR callback.
+ *
+ * The free ICR callback is invoked by the metadata cache when it
+ * wishes to evict an entry, and needs the client to free the memory
+ * allocated for the in core representation.
+ *
+ * The typedef for the free ICR callback is as follows:
+ *
+ * typedef herr_t (*H5C_free_icr_func_t)(void * thing));
+ *
+ * The parameters of the free ICR callback are as follows:
+ *
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry. This
+ * is the same pointer that would be returned by a protect
+ * of the addr and len of the entry.
+ *
+ * Processing in the free ICR function should proceed as follows:
+ *
+ * The free ICR function must free all memory allocated to the
+ * in core representation.
+ *
+ * If the function is successful, it must return SUCCEED.
+ *
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
+ *
+ * At least when compiled with debug, it would be useful if the
+ * free ICR call would fail if the in core representation has been
+ * modified since the last serialize of clear callback.
+ *
+ * CLEAR: Pointer to the clear callback.
+ *
+ * In principle, there should be no need for the clear callback,
+ * as the dirty flag should be maintained by the metadata cache.
+ *.
+ * However, some clients maintain dirty bits on internal data,
+ * and we need some way of keeping these dirty bits in sync with
+ * those maintained by the metadata cache. This callback exists
+ * to serve this purpose. If defined, it is called whenever the
+ * cache marks dirty entry clean, or when the cache is about to
+ * discard a dirty entry without writing it to disk (This
+ * happens as the result of an unprotect call with the
+ * H5AC__DELETED_FLAG set, and the H5C__TAKE_OWNERSHIP_FLAG not
+ * set.)
+ *
+ * Arguably, this functionality should be in the NOTIFY callback.
+ * However, this callback is specific to only a few clients, and
+ * it will be called relatively frequently. Hence it is made its
+ * own callback to minimize overhead.
+ *
+ * The typedef for the clear callback is as follows:
+ *
+ * typedef herr_t (*H5C_clear_func_t)(const H5F_t *f,
+ * void * thing,
+ * hbool_t about_to_destroy);
+ *
+ * The parameters of the clear callback are as follows:
+ *
+ * f: File pointer.
+ *
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry. This
+ * is the same pointer that would be returned by a protect()
+ * call of the associated addr and len.
+ *
+ * about_to_destroy: Boolean flag used to indicate whether the
+ * metadata cache is about to destroy the target metadata
+ * cache entry. The callback may use this flag to omit
+ * operations that are irrelevant it the entry is about
+ * to be destroyed.
+ *
+ * Processing in the clear function should proceed as follows:
+ *
+ * Reset all internal dirty bits in the target metadata cache entry.
+ *
+ * If the about_to_destroy flag is TRUE, the clear function may
+ * ommit any dirty bit that will not trigger a sanity check failure
+ * or otherwise cause problems in the subsequent free icr call.
+ * In particular, the call must ensure that the free icr call will
+ * not fail due to changes prior to this call, and after the
+ * last serialize or clear call.
+ *
+ * If the function is successful, it must return SUCCEED.
+ *
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
+ *
+ * GET_FSF_SIZE: Pointer to the get file space free size callback.
+ *
+ * In principle, there is no need for the get file space free size
+ * callback. However, as an optimization, it is sometimes convenient
+ * to allocate and free file space for a number of cache entries
+ * simultaneously in a single contiguous block of file space.
+ *
+ * File space allocation is done by the client, so the metadata cache
+ * need not be involved. However, since the metadata cache typically
+ * handles file space release when an entry is destroyed, some
+ * adjustment on the part of the metadata cache is required for this
+ * operation.
+ *
+ * The get file space free size callback exists to support this
+ * operation.
+ *
+ * If a group of cache entries that were allocated as a group are to
+ * be discarded and their file space released, the type of the first
+ * (i.e. lowest address) entry in the group must implement the
+ * get free file space size callback.
+ *
+ * To free the file space of all entries in the group in a single
+ * operation, first expunge all entries other than the first without
+ * the free file space flag.
+ *
+ * Then, to complete the operation, unprotect or expunge the first
+ * entry in the block with the free file space flag set. Since
+ * the get free file space callback is implemented, the metadata
+ * cache will use this callback to get the size of the block to be
+ * freed, instead of using the size of the entry as is done otherwise.
+ *
+ * At present this callback is used only by the H5FA and H5EA dblock
+ * and dblock page client classes.
+ *
+ * The typedef for the clear callback is as follows:
+ *
+ * typedef herr_t (*H5C_get_fsf_size_t)(const void * thing,
+ * size_t *fsf_size_ptr);
+ *
+ * The parameters of the clear callback are as follows:
+ *
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry. This
+ * is the same pointer that would be returned by a protect()
+ * call of the associated addr and len.
+ *
+ * fs_size_ptr: Pointer to size_t in which the callback will return
+ * the size of the piece of file space to be freed. Note
+ * that the space to be freed is presumed to have the same
+ * base address as the cache entry.
+ *
+ * The function simply returns the size of the block of file space
+ * to be freed in *fsf_size_ptr.
+ *
+ * If the function is successful, it must return SUCCEED.
+ *
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
+ *
+ ***************************************************************************/
+
+/* Actions that can be reported to 'notify' client callback */
+typedef enum H5C_notify_action_t {
+ H5C_NOTIFY_ACTION_AFTER_INSERT, /* Entry has been added to the cache
+ * via the insert call
+ */
+ H5C_NOTIFY_ACTION_AFTER_LOAD, /* Entry has been loaded into the
+ * from file via the protect call
+ */
+ H5C_NOTIFY_ACTION_AFTER_FLUSH, /* Entry has just been flushed to
+ * file.
+ */
+ H5C_NOTIFY_ACTION_BEFORE_EVICT /* Entry is about to be evicted
+ * from cache.
+ */
+} H5C_notify_action_t;
+
+/* Cache client callback function pointers */
+typedef herr_t (*H5C_get_load_size_func_t)(const void *udata_ptr,
+ size_t *image_len_ptr);
+typedef void *(*H5C_deserialize_func_t)(const void *image_ptr,
+ size_t len, void *udata_ptr, hbool_t *dirty_ptr);
+typedef herr_t (*H5C_image_len_func_t)(const void *thing,
+ size_t *image_len_ptr, hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+typedef herr_t (*H5C_pre_serialize_func_t)(const H5F_t *f, hid_t dxpl_id,
+ void *thing, haddr_t addr, size_t len, size_t compressed_len,
+ haddr_t *new_addr_ptr, size_t *new_len_ptr, size_t *new_compressed_len_ptr,
+ unsigned *flags_ptr);
+typedef herr_t (*H5C_serialize_func_t)(const H5F_t *f, void *image_ptr,
+ size_t len, void *thing);
+typedef herr_t (*H5C_notify_func_t)(H5C_notify_action_t action, void *thing);
+typedef herr_t (*H5C_free_icr_func_t)(void *thing);
+typedef herr_t (*H5C_clear_func_t)(const H5F_t *f, void * thing,
+ hbool_t about_to_destroy);
+typedef herr_t (*H5C_get_fsf_size_t)(const void * thing, size_t *fsf_size_ptr);
+
+/* Metadata cache client class definition */
+typedef struct H5C_class_t {
+ int id;
+ const char * name;
+ H5FD_mem_t mem_type;
+ unsigned flags;
+ H5C_get_load_size_func_t get_load_size;
+ H5C_deserialize_func_t deserialize;
+ H5C_image_len_func_t image_len;
+ H5C_pre_serialize_func_t pre_serialize;
+ H5C_serialize_func_t serialize;
+ H5C_notify_func_t notify;
+ H5C_free_icr_func_t free_icr;
+ H5C_clear_func_t clear;
+ H5C_get_fsf_size_t fsf_size;
+} H5C_class_t;
+
+/* Type defintions of callback functions used by the cache as a whole */
+typedef herr_t (*H5C_write_permitted_func_t)(const H5F_t *f,
+ hbool_t *write_permitted_ptr);
+typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr,
+ hbool_t was_dirty, unsigned flags);
/****************************************************************************
*
@@ -234,7 +1130,7 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* just before the entry is freed.
*
* This is necessary, as the LRU list can be changed out
- * from under H5C_make_space_in_cache() by the flush
+ * from under H5C_make_space_in_cache() by the serialize
* callback which may change the size of an existing entry,
* and/or load a new entry while serializing the target entry.
*
@@ -247,18 +1143,66 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* detect this case, and re-start its scan from the bottom
* of the LRU when this situation occurs.
*
- * This field is only compiled in debug mode.
+ * cache_ptr: Pointer to the cache that this entry is contained within.
*
* addr: Base address of the cache entry on disk.
*
- * size: Length of the cache entry on disk. Note that unlike normal
- * caches, the entries in this cache are of variable length.
- * The entries should never overlap, and when we do writebacks,
- * we will want to writeback adjacent entries where possible.
+ * size: Length of the cache entry on disk in bytes(exception: if
+ * the entry is compressed on disk, this field contains the
+ * uncompressed size of the entry -- see discussion of
+ * compressed entries below). Note that unlike normal
+ * caches, the entries in this cache are of arbitrary size.
+ *
+ * With the exception of compressed entries, the file space
+ * allocations for cache entries implied by the addr and size
+ * fields must be disjoint. For compressed entries,
+ * the size field contains the uncompressed size -- thus in
+ * in this case, substitution of compressed size for size
+ * must result in disjoint file space allocations. However,
+ * as discussed below, the compressed size may not be know.
+ *
+ * Any entry whose associated instance of H5C_class_t has the
+ * H5C__CLASS_COMPRESSED_FLAG set may be compressed. When
+ * an entry is compressed (that is, when filters are enabled
+ * on it), the compressed flag (see below) must be set, and
+ * the compressed size (if known), must be stored in
+ * the compressed_size field.
+ *
+ * Since the compressed size will be unknown unless the
+ * entry is clean, or has an up to date image (see the
+ * image_ptr and image_up_to_date fields below), we use the
+ * uncompressed size for all purposes other than disk I/O.
+ *
+ * compressed: Boolean flag that is set iff the instance of H5C_class_t
+ * associated with the entry has the H5C__CLASS_COMPRESSED_FLAG
+ * set, and filters are enabled on the entry.
+ *
+ * compressed_size: If compressed is TRUE, this field contains the actual
+ * compressed size of the entry in bytes, which is also its
+ * true size on disk -- or the uncompressed size if the
+ * compressed size is unknown (i.e. the entry has been
+ * inserted in the cache, but it has not been compressed yet).
+ * Note that this value will usually be incorrect if the
+ * entry is dirty.
+ *
+ * Since this value is frequently out of date and expensive to
+ * compute, it is used only for disk I/O. The uncompressed
+ * size of the entry (stored in the size field above) is used
+ * for all other purposes (i.e. computing the sum of the sizes
+ * of all entries in the cache, etc.).
+ *
+ * If compressed is FALSE, this field should contain 0.
+ *
+ * image_ptr: Pointer to void. When not NULL, this field points to a
+ * dynamically allocated block of size bytes in which the
+ * on disk image of the metadata cache entry is stored.
*
- * NB: At present, entries need not be contiguous on disk. Until
- * we fix this, we can't do much with writing back adjacent
- * entries.
+ * If the entry is dirty, the pre-serialize and serialize
+ * callbacks must be used to update this image before it is
+ * written to disk
+ *
+ * image_up_to_date: Boolean flag that is set to TRUE when *image_ptr
+ * is up to date, and set to false when the entry is dirtied.
*
* type: Pointer to the instance of H5C_class_t containing pointers
* to the methods for cache entries of the current type. This
@@ -288,6 +1232,9 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* modules using the cache. These still clear the
* is_dirty field as before. -- JRM 7/5/05
*
+ * Update: Management of the is_dirty field is now entirely
+ * in the cache. -- JRM 7/5/07
+ *
* dirtied: Boolean flag used to indicate that the entry has been
* dirtied while protected.
*
@@ -344,9 +1291,10 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* policy code (LRU at present).
*
* 2) A pinned entry can be accessed or modified at any time.
- * Therefore, the cache must check with the entry owner
- * before flushing it. If permission is denied, the
- * cache does not flush the entry.
+ * This places an extra burden on the pre-serialize and
+ * serialize callbacks, which must ensure that a pinned
+ * entry is consistant and ready to write to disk before
+ * generating an image.
*
* 3) A pinned entry can be marked as dirty (and possibly
* change size) while it is unprotected.
@@ -388,6 +1336,11 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* will need to be expanded and tested appropriately if that
* functionality is desired.
*
+ * Update: There are now two possible last entries
+ * (superblock and file driver info message). This
+ * number will probably increase as we add superblock
+ * messages. JRM -- 11/18/14
+ *
* clear_on_unprotect: Boolean flag used only in PHDF5. When H5C is used
* to implement the metadata cache In the parallel case, only
* the cache with mpi rank 0 is allowed to actually write to
@@ -415,11 +1368,6 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* destroy_in_progress: Boolean flag that is set to true iff the entry
* is in the process of being flushed and destroyed.
*
- * free_file_space_on_destroy: Boolean flag that is set to true iff the entry
- * is in the process of being flushed and destroyed and the file
- * space used by the object should be freed by the cache client's
- * 'dest' callback routine.
- *
*
* Fields supporting the 'flush dependency' feature:
*
@@ -503,63 +1451,43 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* The use of the replacement policy fields under the Modified LRU policy
* is discussed below:
*
- * next: Next pointer in either the LRU or the protected list,
- * depending on the current value of protected. If there
- * is no next entry on the list, this field should be set
- * to NULL.
+ * next: Next pointer in either the LRU, the protected list, or
+ * the pinned list depending on the current values of
+ * is_protected and is_pinned. If there is no next entry
+ * on the list, this field should be set to NULL.
*
- * prev: Prev pointer in either the LRU or the protected list,
- * depending on the current value of protected. If there
- * is no previous entry on the list, this field should be
- * set to NULL.
+ * prev: Prev pointer in either the LRU, the protected list,
+ * or the pinned list depending on the current values of
+ * is_protected and is_pinned. If there is no previous
+ * entry on the list, this field should be set to NULL.
*
* aux_next: Next pointer on either the clean or dirty LRU lists.
- * This entry should be NULL when protected is true. When
- * protected is false, and dirty is true, it should point
- * to the next item on the dirty LRU list. When protected
- * is false, and dirty is false, it should point to the
- * next item on the clean LRU list. In either case, when
- * there is no next item, it should be NULL.
+ * This entry should be NULL when either is_protected or
+ * is_pinned is true.
+ *
+ * When is_protected and is_pinned are false, and is_dirty is
+ * true, it should point to the next item on the dirty LRU
+ * list.
+ *
+ * When is_protected and is_pinned are false, and is_dirty is
+ * false, it should point to the next item on the clean LRU
+ * list. In either case, when there is no next item, it
+ * should be NULL.
*
* aux_prev: Previous pointer on either the clean or dirty LRU lists.
- * This entry should be NULL when protected is true. When
- * protected is false, and dirty is true, it should point
- * to the previous item on the dirty LRU list. When protected
- * is false, and dirty is false, it should point to the
- * previous item on the clean LRU list. In either case, when
- * there is no previous item, it should be NULL.
- *
- *
- * Fields supporting metadata journaling:
- *
- * last_trans: unit64_t containing the ID of the last transaction in
- * which this entry was dirtied. If journaling is disabled,
- * or if the entry has never been dirtied in a transaction,
- * this field should be set to zero. Once we notice that
- * the specified transaction has made it to disk, we will
- * reset this field to zero as well.
- *
- * We must maintain this field, as to avoid messages from
- * the future, we must not flush a dirty entry to disk
- * until the last transaction in which it was dirtied
- * has made it to disk in the journal file.
- *
- * trans_next: Next pointer in the entries modified in the current
- * transaction list. This field should always be null
- * unless journaling is enabled, the entry is dirty,
- * and last_trans field contains the current transaction
- * number. Even if all these conditions are fulfilled,
- * the field will still be NULL if this is the last
- * entry on the list.
- *
- * trans_prev: Previous pointer in the entries modified in the current
- * transaction list. This field should always be null
- * unless journaling is enabled, the entry is dirty,
- * and last_trans field contains the current transaction
- * number. Even if all these conditions are fulfilled,
- * the field will still be NULL if this is the first
- * entry on the list.
+ * This entry should be NULL when either is_protected or
+ * is_pinned is true.
+ *
+ * When is_protected and is_pinned are false, and is_dirty is
+ * true, it should point to the previous item on the dirty
+ * LRU list.
+ *
+ * When is_protected and is_pinned are false, and is_dirty
+ * is false, it should point to the previous item on the
+ * clean LRU list.
*
+ * In either case, when there is no previous item, it should
+ * be NULL.
*
* Cache entry stats collection fields:
*
@@ -580,21 +1508,16 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* been pinned in cache in its life time.
*
****************************************************************************/
-
-#ifndef NDEBUG
-#define H5C__H5C_CACHE_ENTRY_T_MAGIC 0x005CAC0A
-#define H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC 0xDeadBeef
-#endif /* NDEBUG */
-
-typedef struct H5C_cache_entry_t
-{
-#ifndef NDEBUG
+typedef struct H5C_cache_entry_t {
uint32_t magic;
-#endif /* NDEBUG */
- H5C_t * cache_ptr;
+ H5C_t * cache_ptr;
haddr_t addr;
size_t size;
- const H5C_class_t * type;
+ hbool_t compressed;
+ size_t compressed_size;
+ void * image_ptr;
+ hbool_t image_up_to_date;
+ const H5C_class_t * type;
haddr_t tag;
hbool_t is_dirty;
hbool_t dirtied;
@@ -612,42 +1535,33 @@ typedef struct H5C_cache_entry_t
#endif /* H5_HAVE_PARALLEL */
hbool_t flush_in_progress;
hbool_t destroy_in_progress;
- hbool_t free_file_space_on_destroy;
/* fields supporting the 'flush dependency' feature: */
-
- struct H5C_cache_entry_t * flush_dep_parent;
+ struct H5C_cache_entry_t * flush_dep_parent;
uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS];
unsigned flush_dep_height;
hbool_t pinned_from_client;
hbool_t pinned_from_cache;
/* fields supporting the hash table: */
-
- struct H5C_cache_entry_t * ht_next;
- struct H5C_cache_entry_t * ht_prev;
+ struct H5C_cache_entry_t * ht_next;
+ struct H5C_cache_entry_t * ht_prev;
/* fields supporting replacement policies: */
-
- struct H5C_cache_entry_t * next;
- struct H5C_cache_entry_t * prev;
- struct H5C_cache_entry_t * aux_next;
- struct H5C_cache_entry_t * aux_prev;
+ struct H5C_cache_entry_t * next;
+ struct H5C_cache_entry_t * prev;
+ struct H5C_cache_entry_t * aux_next;
+ struct H5C_cache_entry_t * aux_prev;
#if H5C_COLLECT_CACHE_ENTRY_STATS
-
/* cache entry stats fields */
-
int32_t accesses;
int32_t clears;
int32_t flushes;
int32_t pins;
-
#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
-
} H5C_cache_entry_t;
-
/****************************************************************************
*
* structure H5C_auto_size_ctl_t
@@ -888,41 +1802,6 @@ typedef struct H5C_cache_entry_t
*
****************************************************************************/
-#define H5C_RESIZE_CFG__VALIDATE_GENERAL 0x1
-#define H5C_RESIZE_CFG__VALIDATE_INCREMENT 0x2
-#define H5C_RESIZE_CFG__VALIDATE_DECREMENT 0x4
-#define H5C_RESIZE_CFG__VALIDATE_INTERACTIONS 0x8
-#define H5C_RESIZE_CFG__VALIDATE_ALL \
-( \
- H5C_RESIZE_CFG__VALIDATE_GENERAL | \
- H5C_RESIZE_CFG__VALIDATE_INCREMENT | \
- H5C_RESIZE_CFG__VALIDATE_DECREMENT | \
- H5C_RESIZE_CFG__VALIDATE_INTERACTIONS \
-)
-
-#define H5C__CURR_AUTO_SIZE_CTL_VER 1
-#define H5C__CURR_AUTO_RESIZE_RPT_FCN_VER 1
-
-#define H5C__MAX_EPOCH_MARKERS 10
-
-#define H5C__DEF_AR_UPPER_THRESHHOLD 0.9999f
-#define H5C__DEF_AR_LOWER_THRESHHOLD 0.9f
-#define H5C__DEF_AR_MAX_SIZE ((size_t)(16 * 1024 * 1024))
-#define H5C__DEF_AR_INIT_SIZE ((size_t)( 1 * 1024 * 1024))
-#define H5C__DEF_AR_MIN_SIZE ((size_t)( 1 * 1024 * 1024))
-#define H5C__DEF_AR_MIN_CLEAN_FRAC 0.5f
-#define H5C__DEF_AR_INCREMENT 2.0f
-#define H5C__DEF_AR_MAX_INCREMENT ((size_t)( 2 * 1024 * 1024))
-#define H5C__DEF_AR_FLASH_MULTIPLE 1.0f
-#define H5C__DEV_AR_FLASH_THRESHOLD 0.25f
-#define H5C__DEF_AR_DECREMENT 0.9f
-#define H5C__DEF_AR_MAX_DECREMENT ((size_t)( 1 * 1024 * 1024))
-#define H5C__DEF_AR_EPCHS_B4_EVICT 3
-#define H5C__DEF_AR_EMPTY_RESERVE 0.05f
-#define H5C__MIN_AR_EPOCH_LENGTH 100
-#define H5C__DEF_AR_EPOCH_LENGTH 50000
-#define H5C__MAX_AR_EPOCH_LENGTH 1000000
-
enum H5C_resize_status
{
in_spec,
@@ -936,303 +1815,117 @@ enum H5C_resize_status
not_full
}; /* enum H5C_resize_conditions */
-typedef void (*H5C_auto_resize_rpt_fcn)(H5C_t * cache_ptr,
- int32_t version,
- double hit_rate,
- enum H5C_resize_status status,
- size_t old_max_cache_size,
- size_t new_max_cache_size,
- size_t old_min_clean_size,
- size_t new_min_clean_size);
+typedef void (*H5C_auto_resize_rpt_fcn)(H5C_t * cache_ptr, int32_t version,
+ double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size,
+ size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size);
-typedef struct H5C_auto_size_ctl_t
-{
+typedef struct H5C_auto_size_ctl_t {
/* general configuration fields: */
int32_t version;
H5C_auto_resize_rpt_fcn rpt_fcn;
-
hbool_t set_initial_size;
size_t initial_size;
-
double min_clean_fraction;
-
size_t max_size;
size_t min_size;
-
int64_t epoch_length;
-
/* size increase control fields: */
enum H5C_cache_incr_mode incr_mode;
-
double lower_hr_threshold;
-
double increment;
-
hbool_t apply_max_increment;
size_t max_increment;
-
enum H5C_cache_flash_incr_mode flash_incr_mode;
double flash_multiple;
double flash_threshold;
-
/* size decrease control fields: */
enum H5C_cache_decr_mode decr_mode;
-
double upper_hr_threshold;
-
double decrement;
-
hbool_t apply_max_decrement;
size_t max_decrement;
-
int32_t epochs_before_eviction;
-
hbool_t apply_empty_reserve;
double empty_reserve;
-
} H5C_auto_size_ctl_t;
-
-/*
- * Library prototypes.
- */
-
-/* #defines of flags used in the flags parameters in some of the
- * following function calls. Note that not all flags are applicable
- * to all function calls. Flags that don't apply to a particular
- * function are ignored in that function.
- *
- * These flags apply to all function calls:
- *
- * H5C__NO_FLAGS_SET (generic "no flags set" for all fcn calls)
- *
- *
- * These flags apply to H5C_insert_entry():
- *
- * H5C__SET_FLUSH_MARKER_FLAG
- * H5C__PIN_ENTRY_FLAG
- *
- * These flags apply to H5C_protect()
- *
- * H5C__READ_ONLY_FLAG
- *
- * These flags apply to H5C_unprotect():
- *
- * H5C__SET_FLUSH_MARKER_FLAG
- * H5C__DELETED_FLAG
- * H5C__DIRTIED_FLAG
- * H5C__PIN_ENTRY_FLAG
- * H5C__UNPIN_ENTRY_FLAG
- * H5C__FREE_FILE_SPACE_FLAG
- * H5C__TAKE_OWNERSHIP_FLAG
- *
- * These flags apply to H5C_expunge_entry():
- *
- * H5C__FREE_FILE_SPACE_FLAG
- *
- * These flags apply to H5C_flush_cache():
- *
- * H5C__FLUSH_INVALIDATE_FLAG
- * H5C__FLUSH_CLEAR_ONLY_FLAG
- * H5C__FLUSH_MARKED_ENTRIES_FLAG
- * H5C__FLUSH_IGNORE_PROTECTED_FLAG (can't use this flag in combination
- * with H5C__FLUSH_INVALIDATE_FLAG)
- *
- * These flags apply to H5C_flush_single_entry():
- *
- * H5C__FLUSH_INVALIDATE_FLAG
- * H5C__FLUSH_CLEAR_ONLY_FLAG
- * H5C__FLUSH_MARKED_ENTRIES_FLAG
- * H5C__TAKE_OWNERSHIP_FLAG
- */
-
-#define H5C__NO_FLAGS_SET 0x0000
-#define H5C__SET_FLUSH_MARKER_FLAG 0x0001
-#define H5C__DELETED_FLAG 0x0002
-#define H5C__DIRTIED_FLAG 0x0004
-#define H5C__PIN_ENTRY_FLAG 0x0008
-#define H5C__UNPIN_ENTRY_FLAG 0x0010
-#define H5C__FLUSH_INVALIDATE_FLAG 0x0020
-#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x0040
-#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0080
-#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0100
-#define H5C__READ_ONLY_FLAG 0x0200
-#define H5C__FREE_FILE_SPACE_FLAG 0x0800
-#define H5C__TAKE_OWNERSHIP_FLAG 0x1000
-#define H5C__FLUSH_LAST_FLAG 0x2000
-#define H5C__FLUSH_COLLECTIVELY_FLAG 0x4000
-
-#ifdef H5_HAVE_PARALLEL
-H5_DLL herr_t H5C_apply_candidate_list(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- H5C_t * cache_ptr,
- int num_candidates,
- haddr_t * candidates_list_ptr,
- int mpi_rank,
- int mpi_size);
-
-H5_DLL herr_t H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr);
-
-H5_DLL herr_t H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr);
-#endif /* H5_HAVE_PARALLEL */
-
-H5_DLL H5C_t * H5C_create(size_t max_cache_size,
- size_t min_clean_size,
- int max_type_id,
- const char * (* type_name_table_ptr),
- H5C_write_permitted_func_t check_write_permitted,
- hbool_t write_permitted,
- H5C_log_flush_func_t log_flush,
- void * aux_ptr);
-
-H5_DLL void H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
- int32_t version,
- double hit_rate,
- enum H5C_resize_status status,
- size_t old_max_cache_size,
- size_t new_max_cache_size,
- size_t old_min_clean_size,
- size_t new_min_clean_size);
-
-H5_DLL herr_t H5C_dest(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id);
-
-H5_DLL herr_t H5C_expunge_entry(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- const H5C_class_t * type,
- haddr_t addr,
- unsigned flags);
-
-H5_DLL herr_t H5C_flush_cache(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- unsigned flags);
-
-H5_DLL herr_t H5C_flush_to_min_clean(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id);
-
-H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t * cache_ptr,
- H5C_auto_size_ctl_t *config_ptr);
-
-H5_DLL herr_t H5C_get_cache_size(H5C_t * cache_ptr,
- size_t * max_size_ptr,
- size_t * min_clean_size_ptr,
- size_t * cur_size_ptr,
- int32_t * cur_num_entries_ptr);
-
-H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t * cache_ptr,
- double * hit_rate_ptr);
-
-H5_DLL herr_t H5C_get_entry_status(const H5F_t *f,
- haddr_t addr,
- size_t * size_ptr,
- hbool_t * in_cache_ptr,
- hbool_t * is_dirty_ptr,
- hbool_t * is_protected_ptr,
- hbool_t * is_pinned_ptr,
- hbool_t * is_flush_dep_parent_ptr,
- hbool_t * is_flush_dep_child_ptr);
-
-H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t * cache_ptr,
- hbool_t * evictions_enabled_ptr);
-
-H5_DLL herr_t H5C_get_trace_file_ptr(const H5C_t *cache_ptr,
- FILE **trace_file_ptr_ptr);
-H5_DLL herr_t H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr,
- FILE **trace_file_ptr_ptr);
-
-H5_DLL herr_t H5C_insert_entry(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- const H5C_class_t * type,
- haddr_t addr,
- void * thing,
- unsigned int flags);
-
-H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- int32_t ce_array_len,
- haddr_t *ce_array_ptr);
-
+/***************************************/
+/* Library-private Function Prototypes */
+/***************************************/
+
+H5_DLL H5C_t *H5C_create(size_t max_cache_size, size_t min_clean_size,
+ int max_type_id, const char *(*type_name_table_ptr),
+ H5C_write_permitted_func_t check_write_permitted, hbool_t write_permitted,
+ H5C_log_flush_func_t log_flush, void *aux_ptr);
+H5_DLL void H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, int32_t version,
+ double hit_rate, enum H5C_resize_status status,
+ size_t old_max_cache_size, size_t new_max_cache_size,
+ size_t old_min_clean_size, size_t new_min_clean_size);
+H5_DLL herr_t H5C_dest(H5F_t *f, hid_t dxpl_id);
+H5_DLL herr_t H5C_expunge_entry(H5F_t *f, hid_t dxpl_id,
+ const H5C_class_t *type, haddr_t addr, unsigned flags);
+H5_DLL herr_t H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags);
+H5_DLL herr_t H5C_flush_to_min_clean(H5F_t *f, hid_t dxpl_id);
+H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t *cache_ptr,
+ H5C_auto_size_ctl_t *config_ptr);
+H5_DLL herr_t H5C_get_cache_size(H5C_t *cache_ptr, size_t *max_size_ptr,
+ size_t *min_clean_size_ptr, size_t *cur_size_ptr,
+ int32_t *cur_num_entries_ptr);
+H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t *cache_ptr, double *hit_rate_ptr);
+H5_DLL herr_t H5C_get_entry_status(const H5F_t *f, haddr_t addr,
+ size_t *size_ptr, hbool_t *in_cache_ptr, hbool_t *is_dirty_ptr,
+ hbool_t *is_protected_ptr, hbool_t *is_pinned_ptr,
+ hbool_t *is_flush_dep_parent_ptr, hbool_t *is_flush_dep_child_ptr);
+H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t *cache_ptr, hbool_t *evictions_enabled_ptr);
+H5_DLL FILE *H5C_get_trace_file_ptr(const H5C_t *cache_ptr);
+H5_DLL FILE *H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr);
+H5_DLL herr_t H5C_insert_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
+ haddr_t addr, void *thing, unsigned int flags);
+H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t dxpl_id, int32_t ce_array_len,
+ haddr_t *ce_array_ptr);
H5_DLL herr_t H5C_mark_entry_dirty(void *thing);
-
-H5_DLL herr_t H5C_move_entry(H5C_t * cache_ptr,
- const H5C_class_t * type,
- haddr_t old_addr,
- haddr_t new_addr);
-
+H5_DLL herr_t H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type,
+ haddr_t old_addr, haddr_t new_addr);
H5_DLL herr_t H5C_pin_protected_entry(void *thing);
-
H5_DLL herr_t H5C_create_flush_dependency(void *parent_thing, void *child_thing);
-
-H5_DLL void * H5C_protect(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- const H5C_class_t * type,
- haddr_t addr,
- void * udata,
- unsigned flags);
-
-H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr);
-
+H5_DLL void * H5C_protect(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
+ haddr_t addr, void *udata, unsigned flags);
+H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr);
H5_DLL herr_t H5C_resize_entry(void *thing, size_t new_size);
-
H5_DLL herr_t H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
- H5C_auto_size_ctl_t *config_ptr);
-
-H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr,
- hbool_t evictions_enabled);
-
-H5_DLL herr_t H5C_set_prefix(H5C_t * cache_ptr, char * prefix);
-
-H5_DLL herr_t H5C_set_trace_file_ptr(H5C_t * cache_ptr,
- FILE * trace_file_ptr);
-
-H5_DLL herr_t H5C_stats(H5C_t * cache_ptr,
- const char * cache_name,
- hbool_t display_detailed_stats);
-
-H5_DLL void H5C_stats__reset(H5C_t * cache_ptr);
-
-H5_DLL herr_t H5C_dump_cache(H5C_t * cache_ptr,
- const char * cache_name);
-
+ H5C_auto_size_ctl_t *config_ptr);
+H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled);
+H5_DLL herr_t H5C_set_prefix(H5C_t *cache_ptr, char *prefix);
+H5_DLL herr_t H5C_set_trace_file_ptr(H5C_t *cache_ptr, FILE *trace_file_ptr);
+H5_DLL herr_t H5C_stats(H5C_t *cache_ptr, const char *cache_name,
+ hbool_t display_detailed_stats);
+H5_DLL void H5C_stats__reset(H5C_t *cache_ptr);
+H5_DLL herr_t H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name);
H5_DLL herr_t H5C_unpin_entry(void *thing);
-
H5_DLL herr_t H5C_destroy_flush_dependency(void *parent_thing, void *child_thing);
+H5_DLL herr_t H5C_unprotect(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
+ haddr_t addr, void *thing, unsigned int flags);
+H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr,
+ unsigned int tests);
+H5_DLL herr_t H5C_ignore_tags(H5C_t *cache_ptr);
+H5_DLL void H5C_retag_copied_metadata(H5C_t *cache_ptr, haddr_t metadata_tag);
-H5_DLL herr_t H5C_unprotect(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- const H5C_class_t * type,
- haddr_t addr,
- void * thing,
- unsigned int flags);
-
-H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
- unsigned int tests);
-
-H5_DLL herr_t H5C_ignore_tags(H5C_t * cache_ptr);
-
-H5_DLL void H5C_retag_copied_metadata(H5C_t * cache_ptr, haddr_t metadata_tag);
+#ifdef H5_HAVE_PARALLEL
+H5_DLL herr_t H5C_apply_candidate_list(H5F_t *f, hid_t dxpl_id,
+ H5C_t *cache_ptr, int num_candidates, haddr_t *candidates_list_ptr,
+ int mpi_rank, int mpi_size);
+H5_DLL herr_t H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr);
+H5_DLL herr_t H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr);
+#endif /* H5_HAVE_PARALLEL */
#ifndef NDEBUG /* debugging functions */
-
H5_DLL herr_t H5C_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr,
- void ** entry_ptr_ptr);
-
-H5_DLL herr_t H5C_verify_entry_type(const H5F_t * f, haddr_t addr,
- const H5C_class_t * expected_type,
- hbool_t * in_cache_ptr,
- hbool_t * type_ok_ptr);
-
+ void **entry_ptr_ptr);
+H5_DLL herr_t H5C_verify_entry_type(const H5F_t *f, haddr_t addr,
+ const H5C_class_t *expected_type, hbool_t *in_cache_ptr,
+ hbool_t *type_ok_ptr);
#endif /* NDEBUG */
#endif /* !_H5Cprivate_H */
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index e22eb3a..af5123c 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -722,7 +722,6 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
unsigned f_ndims; /* The number of dimensions of the file's dataspace */
int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */
H5SL_node_t *curr_node; /* Current node in skip list */
- H5S_sel_type fsel_type; /* Selection type on disk */
char bogus; /* "bogus" buffer to pass to selection iterator */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -836,13 +835,13 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
fm->use_single = FALSE;
/* Get type of selection on disk & in memory */
- if((fsel_type = H5S_GET_SELECT_TYPE(file_space)) < H5S_SEL_NONE)
+ if((fm->fsel_type = H5S_GET_SELECT_TYPE(file_space)) < H5S_SEL_NONE)
HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to get type of selection")
if((fm->msel_type = H5S_GET_SELECT_TYPE(mem_space)) < H5S_SEL_NONE)
HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to get type of selection")
/* If the selection is NONE or POINTS, set the flag to FALSE */
- if(fsel_type == H5S_SEL_POINTS || fsel_type == H5S_SEL_NONE)
+ if(fm->fsel_type == H5S_SEL_POINTS || fm->fsel_type == H5S_SEL_NONE)
sel_hyper_flag = FALSE;
else
sel_hyper_flag = TRUE;
@@ -2014,7 +2013,8 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
/* Determine if we will access all the data in the chunk */
if(dst_accessed_bytes != ctg_store.contig.dset_size ||
- (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size)
+ (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size ||
+ fm->fsel_type == H5S_SEL_POINTS)
entire_chunk = FALSE;
/* Set chunk's [scaled] coordinates */
diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c
index cb8b27d..85882fe 100644
--- a/src/H5Dlayout.c
+++ b/src/H5Dlayout.c
@@ -262,7 +262,7 @@ H5D__layout_oh_create(H5F_t *file, hid_t dxpl_id, H5O_t *oh, H5D_t *dset,
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create EFL file name heap")
/* Pin the heap down in memory */
- if(NULL == (heap = H5HL_protect(file, dxpl_id, efl->heap_addr, H5AC_WRITE)))
+ if(NULL == (heap = H5HL_protect(file, dxpl_id, efl->heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTPROTECT, FAIL, "unable to protect EFL file name heap")
/* Insert "empty" name first */
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index 44441c4..6a0b15e 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -337,6 +337,7 @@ typedef struct H5D_chunk_map_t {
H5S_sel_iter_t mem_iter; /* Iterator for elements in memory selection */
unsigned m_ndims; /* Number of dimensions for memory dataspace */
H5S_sel_type msel_type; /* Selection type in memory */
+ H5S_sel_type fsel_type; /* Selection type in file */
H5SL_t *sel_chunks; /* Skip list containing information for each chunk selected */
diff --git a/src/H5EA.c b/src/H5EA.c
index 37682e7..d96f9c2 100644
--- a/src/H5EA.c
+++ b/src/H5EA.c
@@ -148,7 +148,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array info")
/* Lock the array header into memory */
- if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC_WRITE)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Point extensible array wrapper at header and bump it's ref count */
@@ -209,7 +209,7 @@ H5EA_open(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata))
#ifdef QAK
HDfprintf(stderr, "%s: ea_addr = %a\n", FUNC, ea_addr);
#endif /* QAK */
- if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC_READ)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header, address = %llu", (unsigned long long)ea_addr)
/* Check for pending array deletion */
@@ -333,7 +333,7 @@ END_FUNC(PRIV) /* end H5EA_get_addr() */
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__lookup_elmt(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, H5AC_protect_t thing_acc,
+H5EA__lookup_elmt(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, unsigned thing_acc,
void **thing, uint8_t **thing_elmt_buf, hsize_t *thing_elmt_idx,
H5EA__unprotect_func_t *thing_unprot_func))
@@ -362,6 +362,9 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
HDassert(thing_elmt_buf);
HDassert(thing_unprot_func);
+ /* only the H5AC__READ_ONLY_FLAG may be set in thing_acc */
+ HDassert((thing_acc & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Set the shared array header's file context for this operation */
hdr->f = ea->f;
@@ -377,7 +380,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
HDfprintf(stderr, "%s: Index block address not defined!\n", FUNC, idx);
#endif /* QAK */
/* Check if we are allowed to create the thing */
- if(H5AC_WRITE == thing_acc) {
+ if(0 == (thing_acc & H5AC__READ_ONLY_FLAG)) { /* i.e. r/w access */
/* Create the index block */
hdr->idx_blk_addr = H5EA__iblock_create(hdr, dxpl_id, &stats_changed);
if(!H5F_addr_defined(hdr->idx_blk_addr))
@@ -435,7 +438,7 @@ HDfprintf(stderr, "%s: dblk_idx = %u, iblock->ndblk_addrs = %Zu\n", FUNC, dblk_i
/* Check if the data block has been allocated on disk yet */
if(!H5F_addr_defined(iblock->dblk_addrs[dblk_idx])) {
/* Check if we are allowed to create the thing */
- if(H5AC_WRITE == thing_acc) {
+ if(0 == (thing_acc & H5AC__READ_ONLY_FLAG)) { /* i.e. r/w access */
haddr_t dblk_addr; /* Address of data block created */
hsize_t dblk_off; /* Offset of data block in array */
@@ -475,7 +478,7 @@ HDfprintf(stderr, "%s: dblk_idx = %u, iblock->ndblk_addrs = %Zu\n", FUNC, dblk_i
/* Check if the super block has been allocated on disk yet */
if(!H5F_addr_defined(iblock->sblk_addrs[sblk_off])) {
/* Check if we are allowed to create the thing */
- if(H5AC_WRITE == thing_acc) {
+ if(0 == (thing_acc & H5AC__READ_ONLY_FLAG)) { /* i.e. r/w access */
haddr_t sblk_addr; /* Address of data block created */
/* Create super block */
@@ -508,7 +511,7 @@ HDfprintf(stderr, "%s: dblk_idx = %u, sblock->ndblks = %Zu\n", FUNC, dblk_idx, s
/* Check if the data block has been allocated on disk yet */
if(!H5F_addr_defined(sblock->dblk_addrs[dblk_idx])) {
/* Check if we are allowed to create the thing */
- if(H5AC_WRITE == thing_acc) {
+ if(0 == (thing_acc & H5AC__READ_ONLY_FLAG)) { /* i.e. r/w access */
haddr_t dblk_addr; /* Address of data block created */
hsize_t dblk_off; /* Offset of data block in array */
@@ -568,7 +571,7 @@ HDfprintf(stderr, "%s: sblock->dblk_page_size = %Zu\n", FUNC, sblock->dblk_page_
/* Check if page has been initialized yet */
if(!H5VM_bit_get(sblock->page_init, page_init_idx)) {
/* Check if we are allowed to create the thing */
- if(H5AC_WRITE == thing_acc) {
+ if(0 == (thing_acc & H5AC__READ_ONLY_FLAG)) { /* i.e. r/w access */
/* Create the data block page */
if(H5EA__dblk_page_create(hdr, dxpl_id, sblock, dblk_page_addr) < 0)
H5E_THROW(H5E_CANTCREATE, "unable to create data block page")
@@ -677,7 +680,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
hdr->f = ea->f;
/* Look up the array metadata containing the element we want to set */
- if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC_WRITE, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
+ if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__NO_FLAGS_SET, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
/* Sanity check */
@@ -762,7 +765,7 @@ HDfprintf(stderr, "%s: Index block address is: %a\n", FUNC, hdr->idx_blk_addr);
hdr->f = ea->f;
/* Look up the array metadata containing the element we want to set */
- if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC_READ, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
+ if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__READ_ONLY_FLAG, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
/* Check if the thing holding the element has been created yet */
@@ -905,7 +908,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
HDassert(ea);
/* Look up the array metadata containing the element we want to set */
- if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC_WRITE, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
+ if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__NO_FLAGS_SET, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
/* Sanity check */
@@ -960,7 +963,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
HDassert(ea);
/* Look up the array metadata containing the element we want to set */
- if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC_READ, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
+ if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__READ_ONLY_FLAG, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
/* Sanity check */
@@ -1048,7 +1051,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Lock the array header into memory */
/* (OK to pass in NULL for callback context, since we know the header must be in the cache) */
- if(NULL == (hdr = H5EA__hdr_protect(ea->f, dxpl_id, ea_addr, NULL, H5AC_WRITE)))
+ if(NULL == (hdr = H5EA__hdr_protect(ea->f, dxpl_id, ea_addr, NULL, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTLOAD, "unable to load extensible array header")
/* Set the shared array header's file context for this operation */
@@ -1112,7 +1115,7 @@ H5EA_delete(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata))
#ifdef QAK
HDfprintf(stderr, "%s: ea_addr = %a\n", FUNC, ea_addr);
#endif /* QAK */
- if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC_WRITE)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array header, address = %llu", (unsigned long long)ea_addr)
/* Check for files using shared array header */
diff --git a/src/H5EAcache.c b/src/H5EAcache.c
index 70686fb..0d83fb3 100644
--- a/src/H5EAcache.c
+++ b/src/H5EAcache.c
@@ -57,13 +57,6 @@
#define H5EA_SBLOCK_VERSION 0 /* Super block */
#define H5EA_DBLOCK_VERSION 0 /* Data block */
-/* Size of stack buffer for serialization buffers */
-#define H5EA_HDR_BUF_SIZE 512
-#define H5EA_IBLOCK_BUF_SIZE 512
-#define H5EA_SBLOCK_BUF_SIZE 512
-#define H5EA_DBLOCK_BUF_SIZE 512
-#define H5EA_DBLK_PAGE_BUF_SIZE 512
-
/******************/
/* Local Typedefs */
@@ -80,35 +73,59 @@
/********************/
/* Metadata cache (H5AC) callbacks */
-static H5EA_hdr_t *H5EA__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5EA__cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5EA_hdr_t *hdr, unsigned * flags_ptr);
-static herr_t H5EA__cache_hdr_clear(H5F_t *f, H5EA_hdr_t *hdr, hbool_t destroy);
-static herr_t H5EA__cache_hdr_size(const H5F_t *f, const H5EA_hdr_t *hdr, size_t *size_ptr);
-static herr_t H5EA__cache_hdr_dest(H5F_t *f, H5EA_hdr_t *hdr);
-static H5EA_iblock_t *H5EA__cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5EA__cache_iblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5EA_iblock_t *iblock, unsigned * flags_ptr);
-static herr_t H5EA__cache_iblock_clear(H5F_t *f, H5EA_iblock_t *iblock, hbool_t destroy);
-static herr_t H5EA__cache_iblock_notify(H5AC_notify_action_t action, H5EA_iblock_t *iblock);
-static herr_t H5EA__cache_iblock_size(const H5F_t *f, const H5EA_iblock_t *iblock, size_t *size_ptr);
-static herr_t H5EA__cache_iblock_dest(H5F_t *f, H5EA_iblock_t *iblock);
-static H5EA_sblock_t *H5EA__cache_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5EA__cache_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5EA_sblock_t *sblock, unsigned * flags_ptr);
-static herr_t H5EA__cache_sblock_clear(H5F_t *f, H5EA_sblock_t *sblock, hbool_t destroy);
-static herr_t H5EA__cache_sblock_size(const H5F_t *f, const H5EA_sblock_t *sblock, size_t *size_ptr);
-static herr_t H5EA__cache_sblock_notify(H5AC_notify_action_t action, H5EA_sblock_t *sblock);
-static herr_t H5EA__cache_sblock_dest(H5F_t *f, H5EA_sblock_t *sblock);
-static H5EA_dblock_t *H5EA__cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5EA__cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5EA_dblock_t *dblock, unsigned * flags_ptr);
-static herr_t H5EA__cache_dblock_clear(H5F_t *f, H5EA_dblock_t *dblock, hbool_t destroy);
-static herr_t H5EA__cache_dblock_size(const H5F_t *f, const H5EA_dblock_t *dblock, size_t *size_ptr);
-static herr_t H5EA__cache_dblock_notify(H5AC_notify_action_t action, H5EA_dblock_t *dblock);
-static herr_t H5EA__cache_dblock_dest(H5F_t *f, H5EA_dblock_t *dblock);
-static H5EA_dblk_page_t *H5EA__cache_dblk_page_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5EA__cache_dblk_page_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5EA_dblk_page_t *dblk_page, unsigned * flags_ptr);
-static herr_t H5EA__cache_dblk_page_clear(H5F_t *f, H5EA_dblk_page_t *dblk_page, hbool_t destroy);
-static herr_t H5EA__cache_dblk_page_size(const H5F_t *f, const H5EA_dblk_page_t *dblk_page, size_t *size_ptr);
-static herr_t H5EA__cache_dblk_page_notify(H5AC_notify_action_t action, H5EA_dblk_page_t *dblk_page);
-static herr_t H5EA__cache_dblk_page_dest(H5F_t *f, H5EA_dblk_page_t *dblk_page);
+static herr_t H5EA__cache_hdr_get_load_size(const void *udata, size_t *image_len);
+static void *H5EA__cache_hdr_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5EA__cache_hdr_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5EA__cache_hdr_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5EA__cache_hdr_free_icr(void *thing);
+
+static herr_t H5EA__cache_iblock_get_load_size(const void *udata, size_t *image_len);
+static void *H5EA__cache_iblock_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5EA__cache_iblock_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5EA__cache_iblock_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5EA__cache_iblock_notify(H5AC_notify_action_t action, void *thing);
+static herr_t H5EA__cache_iblock_free_icr(void *thing);
+
+static herr_t H5EA__cache_sblock_get_load_size(const void *udata, size_t *image_len);
+static void *H5EA__cache_sblock_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5EA__cache_sblock_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5EA__cache_sblock_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5EA__cache_sblock_notify(H5AC_notify_action_t action, void *thing);
+static herr_t H5EA__cache_sblock_free_icr(void *thing);
+
+static herr_t H5EA__cache_dblock_get_load_size(const void *udata, size_t *image_len);
+static void *H5EA__cache_dblock_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5EA__cache_dblock_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5EA__cache_dblock_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5EA__cache_dblock_notify(H5AC_notify_action_t action, void *thing);
+static herr_t H5EA__cache_dblock_free_icr(void *thing);
+static herr_t H5EA__cache_dblock_fsf_size(const void *thing, size_t *fsf_size);
+
+static herr_t H5EA__cache_dblk_page_get_load_size(const void *udata, size_t *image_len);
+static void *H5EA__cache_dblk_page_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5EA__cache_dblk_page_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5EA__cache_dblk_page_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5EA__cache_dblk_page_notify(H5AC_notify_action_t action, void *thing);
+static herr_t H5EA__cache_dblk_page_free_icr(void *thing);
/*********************/
@@ -117,57 +134,87 @@ static herr_t H5EA__cache_dblk_page_dest(H5F_t *f, H5EA_dblk_page_t *dblk_page);
/* H5EA header inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_EARRAY_HDR[1] = {{
- H5AC_EARRAY_HDR_ID,
- (H5AC_load_func_t)H5EA__cache_hdr_load,
- (H5AC_flush_func_t)H5EA__cache_hdr_flush,
- (H5AC_dest_func_t)H5EA__cache_hdr_dest,
- (H5AC_clear_func_t)H5EA__cache_hdr_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5EA__cache_hdr_size,
+ H5AC_EARRAY_HDR_ID, /* Metadata client ID */
+ "Extensible Array Header", /* Metadata client name (for debugging) */
+ H5FD_MEM_EARRAY_HDR, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5EA__cache_hdr_get_load_size, /* 'get_load_size' callback */
+ H5EA__cache_hdr_deserialize, /* 'deserialize' callback */
+ H5EA__cache_hdr_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5EA__cache_hdr_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5EA__cache_hdr_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
/* H5EA index block inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_EARRAY_IBLOCK[1] = {{
- H5AC_EARRAY_IBLOCK_ID,
- (H5AC_load_func_t)H5EA__cache_iblock_load,
- (H5AC_flush_func_t)H5EA__cache_iblock_flush,
- (H5AC_dest_func_t)H5EA__cache_iblock_dest,
- (H5AC_clear_func_t)H5EA__cache_iblock_clear,
- (H5AC_notify_func_t)H5EA__cache_iblock_notify,
- (H5AC_size_func_t)H5EA__cache_iblock_size,
+ H5AC_EARRAY_IBLOCK_ID, /* Metadata client ID */
+ "Extensible Array Index Block", /* Metadata client name (for debugging) */
+ H5FD_MEM_EARRAY_IBLOCK, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5EA__cache_iblock_get_load_size, /* 'get_load_size' callback */
+ H5EA__cache_iblock_deserialize, /* 'deserialize' callback */
+ H5EA__cache_iblock_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5EA__cache_iblock_serialize, /* 'serialize' callback */
+ H5EA__cache_iblock_notify, /* 'notify' callback */
+ H5EA__cache_iblock_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
/* H5EA super block inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_EARRAY_SBLOCK[1] = {{
- H5AC_EARRAY_SBLOCK_ID,
- (H5AC_load_func_t)H5EA__cache_sblock_load,
- (H5AC_flush_func_t)H5EA__cache_sblock_flush,
- (H5AC_dest_func_t)H5EA__cache_sblock_dest,
- (H5AC_clear_func_t)H5EA__cache_sblock_clear,
- (H5AC_notify_func_t)H5EA__cache_sblock_notify,
- (H5AC_size_func_t)H5EA__cache_sblock_size,
+ H5AC_EARRAY_SBLOCK_ID, /* Metadata client ID */
+ "Extensible Array Super Block", /* Metadata client name (for debugging) */
+ H5FD_MEM_EARRAY_SBLOCK, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5EA__cache_sblock_get_load_size, /* 'get_load_size' callback */
+ H5EA__cache_sblock_deserialize, /* 'deserialize' callback */
+ H5EA__cache_sblock_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5EA__cache_sblock_serialize, /* 'serialize' callback */
+ H5EA__cache_sblock_notify, /* 'notify' callback */
+ H5EA__cache_sblock_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
/* H5EA data block inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_EARRAY_DBLOCK[1] = {{
- H5AC_EARRAY_DBLOCK_ID,
- (H5AC_load_func_t)H5EA__cache_dblock_load,
- (H5AC_flush_func_t)H5EA__cache_dblock_flush,
- (H5AC_dest_func_t)H5EA__cache_dblock_dest,
- (H5AC_clear_func_t)H5EA__cache_dblock_clear,
- (H5AC_notify_func_t)H5EA__cache_dblock_notify,
- (H5AC_size_func_t)H5EA__cache_dblock_size,
+ H5AC_EARRAY_DBLOCK_ID, /* Metadata client ID */
+ "Extensible Array Data Block", /* Metadata client name (for debugging) */
+ H5FD_MEM_EARRAY_DBLOCK, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5EA__cache_dblock_get_load_size, /* 'get_load_size' callback */
+ H5EA__cache_dblock_deserialize, /* 'deserialize' callback */
+ H5EA__cache_dblock_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5EA__cache_dblock_serialize, /* 'serialize' callback */
+ H5EA__cache_dblock_notify, /* 'notify' callback */
+ H5EA__cache_dblock_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ H5EA__cache_dblock_fsf_size, /* 'fsf_size' callback */
}};
/* H5EA data block page inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_EARRAY_DBLK_PAGE[1] = {{
- H5AC_EARRAY_DBLK_PAGE_ID,
- (H5AC_load_func_t)H5EA__cache_dblk_page_load,
- (H5AC_flush_func_t)H5EA__cache_dblk_page_flush,
- (H5AC_dest_func_t)H5EA__cache_dblk_page_dest,
- (H5AC_clear_func_t)H5EA__cache_dblk_page_clear,
- (H5AC_notify_func_t)H5EA__cache_dblk_page_notify,
- (H5AC_size_func_t)H5EA__cache_dblk_page_size,
+ H5AC_EARRAY_DBLK_PAGE_ID, /* Metadata client ID */
+ "Extensible Array Data Block Page", /* Metadata client name (for debugging) */
+ H5FD_MEM_EARRAY_DBLK_PAGE, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5EA__cache_dblk_page_get_load_size, /* 'get_load_size' callback */
+ H5EA__cache_dblk_page_deserialize, /* 'deserialize' callback */
+ H5EA__cache_dblk_page_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5EA__cache_dblk_page_serialize, /* 'serialize' callback */
+ H5EA__cache_dblk_page_notify, /* 'notify' callback */
+ H5EA__cache_dblk_page_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
@@ -183,97 +230,111 @@ const H5AC_class_t H5AC_EARRAY_DBLK_PAGE[1] = {{
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_hdr_load
+ * Function: H5EA__cache_hdr_get_load_size
+ *
+ * Purpose: Compute the size of the data structure on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 16, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_hdr_get_load_size(const void *_udata, size_t *image_len))
+
+ /* Local variables */
+ const H5EA_hdr_cache_ud_t *udata = (const H5EA_hdr_cache_ud_t *)_udata; /* User data for callback */
+
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(H5F_addr_defined(udata->addr));
+ HDassert(image_len);
+
+ /* Set the image length size */
+ *image_len = (size_t)H5EA_HEADER_SIZE_FILE(udata->f);
+
+END_FUNC(STATIC) /* end H5EA__cache_hdr_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5EA__cache_hdr_deserialize
*
- * Purpose: Loads an extensible array header from the disk.
+ * Purpose: Loads a data structure from the disk.
*
- * Return: Success: Pointer to a new extensible array
+ * Return: Success: Pointer to a new B-tree.
* Failure: NULL
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Aug 26 2008
+ * koziol@hdfgroup.org
+ * July 16, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
-H5EA_hdr_t *, NULL, NULL,
-H5EA__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata))
+void *, NULL, NULL,
+H5EA__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty))
/* Local variables */
H5EA_cls_id_t id; /* ID of extensible array class, as found in file */
H5EA_hdr_t *hdr = NULL; /* Extensible array info */
- size_t size; /* Header size */
- H5WB_t *wb = NULL; /* Wrapped buffer for header data */
- uint8_t hdr_buf[H5EA_HDR_BUF_SIZE]; /* Buffer for header */
- uint8_t *buf; /* Pointer to header buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ H5EA_hdr_cache_ud_t *udata = (H5EA_hdr_cache_ud_t *)_udata;
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ HDassert(image);
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(H5F_addr_defined(udata->addr));
/* Allocate space for the extensible array data structure */
- if(NULL == (hdr = H5EA__hdr_alloc(f)))
+ if(NULL == (hdr = H5EA__hdr_alloc(udata->f)))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array shared header")
/* Set the extensible array header's address */
- hdr->addr = addr;
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the 'base' size of the extensible array header on disk */
- size = H5EA_HEADER_SIZE_HDR(hdr);
-
- /* Get a pointer to a buffer that's large enough for serialized header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Read header from disk */
- if(H5F_block_read(f, H5FD_MEM_EARRAY_HDR, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_READERROR, "can't read extensible array header")
-
- /* Get temporary pointer to serialized header */
- p = buf;
+ hdr->addr = udata->addr;
/* Magic number */
- if(HDmemcmp(p, H5EA_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5EA_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
H5E_THROW(H5E_BADVALUE, "wrong extensible array header signature")
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5EA_HDR_VERSION)
+ if(*image++ != H5EA_HDR_VERSION)
H5E_THROW(H5E_VERSION, "wrong extensible array header version")
/* Extensible array class */
- id = (H5EA_cls_id_t)*p++;
+ id = (H5EA_cls_id_t)*image++;
if(id >= H5EA_NUM_CLS_ID)
H5E_THROW(H5E_BADTYPE, "incorrect extensible array class")
hdr->cparam.cls = H5EA_client_class_g[id];
/* General array creation/configuration information */
- hdr->cparam.raw_elmt_size = *p++; /* Element size in file (in bytes) */
- hdr->cparam.max_nelmts_bits = *p++; /* Log2(Max. # of elements in array) - i.e. # of bits needed to store max. # of elements */
- hdr->cparam.idx_blk_elmts = *p++; /* # of elements to store in index block */
- hdr->cparam.data_blk_min_elmts = *p++; /* Min. # of elements per data block */
- hdr->cparam.sup_blk_min_data_ptrs = *p++; /* Min. # of data block pointers for a super block */
- hdr->cparam.max_dblk_page_nelmts_bits = *p++; /* Log2(Max. # of elements in data block page) - i.e. # of bits needed to store max. # of elements in data block page */
+ hdr->cparam.raw_elmt_size = *image++; /* Element size in file (in bytes) */
+ hdr->cparam.max_nelmts_bits = *image++; /* Log2(Max. # of elements in array) - i.e. # of bits needed to store max. # of elements */
+ hdr->cparam.idx_blk_elmts = *image++; /* # of elements to store in index block */
+ hdr->cparam.data_blk_min_elmts = *image++; /* Min. # of elements per data block */
+ hdr->cparam.sup_blk_min_data_ptrs = *image++; /* Min. # of data block pointers for a super block */
+ hdr->cparam.max_dblk_page_nelmts_bits = *image++; /* Log2(Max. # of elements in data block page) - i.e. # of bits needed to store max. # of elements in data block page */
/* Array statistics */
- hdr->stats.computed.hdr_size = size; /* Size of header in file */
- H5F_DECODE_LENGTH(f, p, hdr->stats.stored.nsuper_blks); /* Number of super blocks created */
- H5F_DECODE_LENGTH(f, p, hdr->stats.stored.super_blk_size); /* Size of super blocks created */
- H5F_DECODE_LENGTH(f, p, hdr->stats.stored.ndata_blks); /* Number of data blocks created */
- H5F_DECODE_LENGTH(f, p, hdr->stats.stored.data_blk_size); /* Size of data blocks created */
- H5F_DECODE_LENGTH(f, p, hdr->stats.stored.max_idx_set); /* Max. index set (+1) */
- H5F_DECODE_LENGTH(f, p, hdr->stats.stored.nelmts); /* Number of elements 'realized' */
+ hdr->stats.computed.hdr_size = len; /* Size of header in file */
+ H5F_DECODE_LENGTH(udata->f, image, hdr->stats.stored.nsuper_blks); /* Number of super blocks created */
+ H5F_DECODE_LENGTH(udata->f, image, hdr->stats.stored.super_blk_size); /* Size of super blocks created */
+ H5F_DECODE_LENGTH(udata->f, image, hdr->stats.stored.ndata_blks); /* Number of data blocks created */
+ H5F_DECODE_LENGTH(udata->f, image, hdr->stats.stored.data_blk_size); /* Size of data blocks created */
+ H5F_DECODE_LENGTH(udata->f, image, hdr->stats.stored.max_idx_set); /* Max. index set (+1) */
+ H5F_DECODE_LENGTH(udata->f, image, hdr->stats.stored.nelmts); /* Number of elements 'realized' */
/* Internal information */
- H5F_addr_decode(f, &p, &hdr->idx_blk_addr); /* Address of index block */
+ H5F_addr_decode(udata->f, &image, &hdr->idx_blk_addr); /* Address of index block */
/* Index block statistics */
if(H5F_addr_defined(hdr->idx_blk_addr)) {
@@ -298,26 +359,26 @@ H5EA__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata))
/* Sanity check */
/* (allow for checksum not decoded yet) */
- HDassert((size_t)(p - buf) == (size - H5EA_SIZEOF_CHKSUM));
+ HDassert((size_t)(image - (const uint8_t *)_image) == (len - H5EA_SIZEOF_CHKSUM));
/* Compute checksum on entire header */
/* (including the filter information, if present) */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
-
- /* Sanity check */
- HDassert((size_t)(p - buf) == size);
+ UINT32DECODE(image, stored_chksum);
/* Verify checksum */
if(stored_chksum != computed_chksum)
H5E_THROW(H5E_BADVALUE, "incorrect metadata checksum for extensible array header")
+ /* Sanity check */
+ HDassert((size_t)(image - (const uint8_t *)_image) == len);
+
/* Finish initializing extensible array header */
- if(H5EA__hdr_init(hdr, udata) < 0)
+ if(H5EA__hdr_init(hdr, udata->ctx_udata) < 0)
H5E_THROW(H5E_CANTINIT, "initialization failed for extensible array header")
- HDassert(hdr->size == size);
+ HDassert(hdr->size == len);
/* Set return value */
ret_value = hdr;
@@ -325,263 +386,211 @@ H5EA__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata))
CATCH
/* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
if(!ret_value)
if(hdr && H5EA__hdr_dest(hdr) < 0)
H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array header")
-END_FUNC(STATIC) /* end H5EA__cache_hdr_load() */
+END_FUNC(STATIC) /* end H5EA__cache_hdr_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_hdr_flush
+ * Function: H5EA__cache_hdr_image_len
*
- * Purpose: Flushes a dirty extensible array header to disk.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Aug 26 2008
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 16, 2013
*
*-------------------------------------------------------------------------
*/
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA__cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- H5EA_hdr_t *hdr, unsigned H5_ATTR_UNUSED * flags_ptr))
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_hdr_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
- H5WB_t *wb = NULL; /* Wrapped buffer for header data */
- uint8_t hdr_buf[H5EA_HDR_BUF_SIZE]; /* Buffer for header */
+ /* Local variables */
+ const H5EA_hdr_t *hdr = (const H5EA_hdr_t *)_thing; /* Pointer to the object */
- /* check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(hdr);
+ HDassert(image_len);
- if(hdr->cache_info.is_dirty) {
- uint8_t *buf; /* Temporary raw data buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Header size on disk */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
-
- /* Wrap the local buffer for serialized header info */
- if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the array header on disk */
- size = hdr->size;
-
- /* Get a pointer to a buffer that's large enough for serialized header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
+ /* Set the image length size */
+ *image_len = hdr->size;
- /* Get temporary pointer to serialized header */
- p = buf;
+END_FUNC(STATIC) /* end H5EA__cache_hdr_image_len() */
- /* Magic number */
- HDmemcpy(p, H5EA_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* Version # */
- *p++ = H5EA_HDR_VERSION;
+
+/*-------------------------------------------------------------------------
+ * Function: H5EA__cache_hdr_serialize
+ *
+ * Purpose: Flushes a dirty object to disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 16, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_hdr_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing))
- /* Extensible array type */
- *p++ = hdr->cparam.cls->id;
+ /* Local variables */
+ H5EA_hdr_t *hdr = (H5EA_hdr_t *)_thing; /* Pointer to the extensible array header */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
- /* General array creation/configuration information */
- *p++ = hdr->cparam.raw_elmt_size; /* Element size in file (in bytes) */
- *p++ = hdr->cparam.max_nelmts_bits; /* Log2(Max. # of elements in array) - i.e. # of bits needed to store max. # of elements */
- *p++ = hdr->cparam.idx_blk_elmts; /* # of elements to store in index block */
- *p++ = hdr->cparam.data_blk_min_elmts; /* Min. # of elements per data block */
- *p++ = hdr->cparam.sup_blk_min_data_ptrs; /* Min. # of data block pointers for a super block */
- *p++ = hdr->cparam.max_dblk_page_nelmts_bits; /* Log2(Max. # of elements in data block page) - i.e. # of bits needed to store max. # of elements in data block page */
+ /* check arguments */
+ HDassert(f);
+ HDassert(image);
+ HDassert(hdr);
- /* Array statistics */
- H5F_ENCODE_LENGTH(f, p, hdr->stats.stored.nsuper_blks); /* Number of super blocks created */
- H5F_ENCODE_LENGTH(f, p, hdr->stats.stored.super_blk_size); /* Size of super blocks created */
- H5F_ENCODE_LENGTH(f, p, hdr->stats.stored.ndata_blks); /* Number of data blocks created */
- H5F_ENCODE_LENGTH(f, p, hdr->stats.stored.data_blk_size); /* Size of data blocks created */
- H5F_ENCODE_LENGTH(f, p, hdr->stats.stored.max_idx_set); /* Max. index set (+1) */
- H5F_ENCODE_LENGTH(f, p, hdr->stats.stored.nelmts); /* Number of elements 'realized' */
+ /* Magic number */
+ HDmemcpy(image, H5EA_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- /* Internal information */
- H5F_addr_encode(f, &p, hdr->idx_blk_addr); /* Address of index block */
+ /* Version # */
+ *image++ = H5EA_HDR_VERSION;
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ /* Extensible array type */
+ *image++ = hdr->cparam.cls->id;
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
+ /* General array creation/configuration information */
+ *image++ = hdr->cparam.raw_elmt_size; /* Element size in file (in bytes) */
+ *image++ = hdr->cparam.max_nelmts_bits; /* Log2(Max. # of elements in array) - i.e. # of bits needed to store max. # of elements */
+ *image++ = hdr->cparam.idx_blk_elmts; /* # of elements to store in index block */
+ *image++ = hdr->cparam.data_blk_min_elmts; /* Min. # of elements per data block */
+ *image++ = hdr->cparam.sup_blk_min_data_ptrs; /* Min. # of data block pointers for a super block */
+ *image++ = hdr->cparam.max_dblk_page_nelmts_bits; /* Log2(Max. # of elements in data block page) - i.e. # of bits needed to store max. # of elements in data block page */
- /* Write the array header. */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_EARRAY_HDR, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_WRITEERROR, "unable to save extensible array header to disk")
+ /* Array statistics */
+ H5F_ENCODE_LENGTH(f, image, hdr->stats.stored.nsuper_blks); /* Number of super blocks created */
+ H5F_ENCODE_LENGTH(f, image, hdr->stats.stored.super_blk_size); /* Size of super blocks created */
+ H5F_ENCODE_LENGTH(f, image, hdr->stats.stored.ndata_blks); /* Number of data blocks created */
+ H5F_ENCODE_LENGTH(f, image, hdr->stats.stored.data_blk_size); /* Size of data blocks created */
+ H5F_ENCODE_LENGTH(f, image, hdr->stats.stored.max_idx_set); /* Max. index set (+1) */
+ H5F_ENCODE_LENGTH(f, image, hdr->stats.stored.nelmts); /* Number of elements 'realized' */
- hdr->cache_info.is_dirty = FALSE;
- } /* end if */
+ /* Internal information */
+ H5F_addr_encode(f, &image, hdr->idx_blk_addr); /* Address of index block */
- if(destroy)
- if(H5EA__cache_hdr_dest(f, hdr) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array header")
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
-CATCH
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
-END_FUNC(STATIC) /* end H5EA__cache_hdr_flush() */
+END_FUNC(STATIC) /* end H5EA__cache_hdr_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_hdr_clear
+ * Function: H5EA__cache_hdr_free_icr
*
- * Purpose: Mark a extensible array header in memory as non-dirty.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Aug 26 2008
+ * koziol@hdfgroup.org
+ * July 16, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__cache_hdr_clear(H5F_t *f, H5EA_hdr_t *hdr, hbool_t destroy))
+H5EA__cache_hdr_free_icr(void *thing))
- /* Sanity check */
- HDassert(hdr);
-
- /* Reset the dirty flag. */
- hdr->cache_info.is_dirty = FALSE;
+ /* Check arguments */
+ HDassert(thing);
- if(destroy)
- if(H5EA__cache_hdr_dest(f, hdr) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array header")
+ /* Release the extensible array header */
+ if(H5EA__hdr_dest((H5EA_hdr_t *)thing) < 0)
+ H5E_THROW(H5E_CANTFREE, "can't free extensible array header")
CATCH
-END_FUNC(STATIC) /* end H5EA__cache_hdr_clear() */
+END_FUNC(STATIC) /* end H5EA__cache_hdr_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_hdr_size
+ * Function: H5EA__cache_iblock_get_load_size
*
- * Purpose: Compute the size in bytes of a extensible array header
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Aug 26 2008
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
-/* ARGSUSED */
BEGIN_FUNC(STATIC, NOERR,
herr_t, SUCCEED, -,
-H5EA__cache_hdr_size(const H5F_t H5_ATTR_UNUSED *f, const H5EA_hdr_t *hdr,
- size_t *size_ptr))
-
- /* Sanity check */
- HDassert(f);
- HDassert(hdr);
- HDassert(size_ptr);
-
- /* Set size value */
- *size_ptr = hdr->size;
-
-END_FUNC(STATIC) /* end H5EA__cache_hdr_size() */
+H5EA__cache_iblock_get_load_size(const void *_udata, size_t *image_len))
-
-/*-------------------------------------------------------------------------
- * Function: H5EA__cache_hdr_dest
- *
- * Purpose: Destroys an extensible array header in memory.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Aug 26 2008
- *
- *-------------------------------------------------------------------------
- */
-/* ARGSUSED */
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA__cache_hdr_dest(H5F_t *f, H5EA_hdr_t *hdr))
+ /* Local variables */
+ const H5EA_hdr_t *hdr = (const H5EA_hdr_t *)_udata; /* User data for callback */
+ H5EA_iblock_t iblock; /* Fake index block for computing size */
/* Check arguments */
- HDassert(f);
HDassert(hdr);
+ HDassert(image_len);
- /* Verify that header is clean */
- HDassert(hdr->cache_info.is_dirty == FALSE);
-
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!hdr->cache_info.free_file_space_on_destroy || H5F_addr_defined(hdr->cache_info.addr));
-
- /* Check for freeing file space for extensible array header */
- if(hdr->cache_info.free_file_space_on_destroy) {
- /* Sanity check address */
- HDassert(H5F_addr_eq(hdr->addr, hdr->cache_info.addr));
-
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_EARRAY_HDR, H5AC_dxpl_id, hdr->cache_info.addr, (hsize_t)hdr->size) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to free extensible array header")
- } /* end if */
+ /* Set up fake index block for computing size on disk */
+ HDmemset(&iblock, 0, sizeof(iblock));
+ iblock.hdr = (H5EA_hdr_t *)hdr; /* Casting away 'const' OK - QAK */
+ iblock.nsblks = H5EA_SBLK_FIRST_IDX(hdr->cparam.sup_blk_min_data_ptrs);
+ iblock.ndblk_addrs = 2 * ((size_t)hdr->cparam.sup_blk_min_data_ptrs - 1);
+ iblock.nsblk_addrs = hdr->nsblks - iblock.nsblks;
- /* Release the extensible array header */
- if(H5EA__hdr_dest(hdr) < 0)
- H5E_THROW(H5E_CANTFREE, "can't free extensible array header")
-
-CATCH
+ /* Set the image length size */
+ *image_len = (size_t)H5EA_IBLOCK_SIZE(&iblock);
-END_FUNC(STATIC) /* end H5EA__cache_hdr_dest() */
+END_FUNC(STATIC) /* end H5EA__cache_iblock_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_iblock_load
+ * Function: H5EA__cache_iblock_deserialize
*
- * Purpose: Loads an extensible array index block from the disk.
+ * Purpose: Loads a data structure from the disk.
*
- * Return: Success: Pointer to a new extensible array index block
+ * Return: Success: Pointer to a new B-tree.
* Failure: NULL
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sep 9 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
-H5EA_iblock_t *, NULL, NULL,
-H5EA__cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
+void *, NULL, NULL,
+H5EA__cache_iblock_deserialize(const void *_image, size_t len,
+ void *_udata, hbool_t H5_ATTR_UNUSED *dirty))
/* Local variables */
- H5EA_hdr_t *hdr = (H5EA_hdr_t *)_udata; /* Shared extensible array information */
H5EA_iblock_t *iblock = NULL; /* Index block info */
- size_t size; /* Index block size */
- H5WB_t *wb = NULL; /* Wrapped buffer for index block data */
- uint8_t iblock_buf[H5EA_IBLOCK_BUF_SIZE]; /* Buffer for index block */
- uint8_t *buf; /* Pointer to index block buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ H5EA_hdr_t *hdr = (H5EA_hdr_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
haddr_t arr_addr; /* Address of array header in the file */
size_t u; /* Local index variable */
- /* Sanity check */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
+ HDassert(image);
HDassert(hdr);
/* Allocate the extensible array index block */
@@ -589,41 +598,23 @@ H5EA__cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array index block")
/* Set the extensible array index block's address */
- iblock->addr = addr;
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(iblock_buf, sizeof(iblock_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the extensible array index block on disk */
- size = H5EA_IBLOCK_SIZE(iblock);
-
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Read index block from disk */
- if(H5F_block_read(f, H5FD_MEM_EARRAY_IBLOCK, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_READERROR, "can't read extensible array index block")
-
- /* Get temporary pointer to serialized header */
- p = buf;
+ iblock->addr = hdr->idx_blk_addr;
/* Magic number */
- if(HDmemcmp(p, H5EA_IBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5EA_IBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
H5E_THROW(H5E_BADVALUE, "wrong extensible array index block signature")
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5EA_IBLOCK_VERSION)
+ if(*image++ != H5EA_IBLOCK_VERSION)
H5E_THROW(H5E_VERSION, "wrong extensible array index block version")
/* Extensible array type */
- if(*p++ != (uint8_t)hdr->cparam.cls->id)
+ if(*image++ != (uint8_t)hdr->cparam.cls->id)
H5E_THROW(H5E_BADTYPE, "incorrect extensible array class")
/* Address of header for array that owns this block (just for file integrity checks) */
- H5F_addr_decode(f, &p, &arr_addr);
+ H5F_addr_decode(hdr->f, &image, &arr_addr);
if(H5F_addr_ne(arr_addr, hdr->addr))
H5E_THROW(H5E_BADVALUE, "wrong extensible array header address")
@@ -632,40 +623,40 @@ H5EA__cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
/* Decode elements in index block */
if(hdr->cparam.idx_blk_elmts > 0) {
/* Convert from raw elements on disk into native elements in memory */
- if((hdr->cparam.cls->decode)(p, iblock->elmts, (size_t)hdr->cparam.idx_blk_elmts, hdr->cb_ctx) < 0)
+ if((hdr->cparam.cls->decode)(image, iblock->elmts, (size_t)hdr->cparam.idx_blk_elmts, hdr->cb_ctx) < 0)
H5E_THROW(H5E_CANTDECODE, "can't decode extensible array index elements")
- p += (hdr->cparam.idx_blk_elmts * hdr->cparam.raw_elmt_size);
+ image += (hdr->cparam.idx_blk_elmts * hdr->cparam.raw_elmt_size);
} /* end if */
/* Decode data block addresses in index block */
if(iblock->ndblk_addrs > 0) {
/* Decode addresses of data blocks in index block */
for(u = 0; u < iblock->ndblk_addrs; u++)
- H5F_addr_decode(f, &p, &iblock->dblk_addrs[u]);
+ H5F_addr_decode(hdr->f, &image, &iblock->dblk_addrs[u]);
} /* end if */
/* Decode super block addresses in index block */
if(iblock->nsblk_addrs > 0) {
/* Decode addresses of super blocks in index block */
for(u = 0; u < iblock->nsblk_addrs; u++)
- H5F_addr_decode(f, &p, &iblock->sblk_addrs[u]);
+ H5F_addr_decode(hdr->f, &image, &iblock->sblk_addrs[u]);
} /* end if */
/* Sanity check */
/* (allow for checksum not decoded yet) */
- HDassert((size_t)(p - buf) == (size - H5EA_SIZEOF_CHKSUM));
+ HDassert((size_t)(image - (const uint8_t *)_image) == (len - H5EA_SIZEOF_CHKSUM));
/* Save the index block's size */
- iblock->size = size;
+ iblock->size = len;
/* Compute checksum on index block */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ computed_chksum = H5_checksum_metadata((const uint8_t *)_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - buf) == iblock->size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == iblock->size);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -677,161 +668,128 @@ H5EA__cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
CATCH
/* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
if(!ret_value)
if(iblock && H5EA__iblock_dest(iblock) < 0)
H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array index block")
-END_FUNC(STATIC) /* end H5EA__cache_iblock_load() */
+END_FUNC(STATIC) /* end H5EA__cache_iblock_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_iblock_flush
+ * Function: H5EA__cache_iblock_image_len
+ *
+ * Purpose: Compute the size of the data structure on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
*
- * Purpose: Flushes a dirty extensible array index block to disk.
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 17, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_iblock_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
+
+ /* Local variables */
+ const H5EA_iblock_t *iblock = (const H5EA_iblock_t *)_thing; /* Pointer to the object */
+
+ /* Check arguments */
+ HDassert(iblock);
+ HDassert(image_len);
+
+ /* Set the image length size */
+ *image_len = iblock->size;
+
+END_FUNC(STATIC) /* end H5EA__cache_iblock_image_len() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5EA__cache_iblock_serialize
+ *
+ * Purpose: Flushes a dirty object to disk.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sep 9 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__cache_iblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- H5EA_iblock_t *iblock, unsigned H5_ATTR_UNUSED * flags_ptr))
+H5EA__cache_iblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing))
/* Local variables */
- H5WB_t *wb = NULL; /* Wrapped buffer for serializing data */
- uint8_t ser_buf[H5EA_IBLOCK_BUF_SIZE]; /* Serialization buffer */
+ H5EA_iblock_t *iblock = (H5EA_iblock_t *)_thing; /* Pointer to the object to serialize */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
- /* Sanity check */
+ /* check arguments */
HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ HDassert(image);
HDassert(iblock);
HDassert(iblock->hdr);
- if(iblock->cache_info.is_dirty) {
- uint8_t *buf; /* Temporary raw data buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Index block size on disk */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(ser_buf, sizeof(ser_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the index block on disk */
- size = iblock->size;
-
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Get temporary pointer to serialized info */
- p = buf;
+ /* Get temporary pointer to serialized info */
- /* Magic number */
- HDmemcpy(p, H5EA_IBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* Version # */
- *p++ = H5EA_IBLOCK_VERSION;
-
- /* Extensible array type */
- *p++ = iblock->hdr->cparam.cls->id;
-
- /* Address of array header for array which owns this block */
- H5F_addr_encode(f, &p, iblock->hdr->addr);
-
- /* Internal information */
-
- /* Encode elements in index block */
- if(iblock->hdr->cparam.idx_blk_elmts > 0) {
- /* Convert from native elements in memory into raw elements on disk */
- if((iblock->hdr->cparam.cls->encode)(p, iblock->elmts, (size_t)iblock->hdr->cparam.idx_blk_elmts, iblock->hdr->cb_ctx) < 0)
- H5E_THROW(H5E_CANTENCODE, "can't encode extensible array index elements")
- p += (iblock->hdr->cparam.idx_blk_elmts * iblock->hdr->cparam.raw_elmt_size);
- } /* end if */
-
- /* Encode data block addresses in index block */
- if(iblock->ndblk_addrs > 0) {
- size_t u; /* Local index variable */
+ /* Magic number */
+ HDmemcpy(image, H5EA_IBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- /* Encode addresses of data blocks in index block */
- for(u = 0; u < iblock->ndblk_addrs; u++)
- H5F_addr_encode(f, &p, iblock->dblk_addrs[u]);
- } /* end if */
+ /* Version # */
+ *image++ = H5EA_IBLOCK_VERSION;
- /* Encode data block addresses in index block */
- if(iblock->nsblk_addrs > 0) {
- size_t u; /* Local index variable */
+ /* Extensible array type */
+ *image++ = iblock->hdr->cparam.cls->id;
- /* Encode addresses of super blocks in index block */
- for(u = 0; u < iblock->nsblk_addrs; u++)
- H5F_addr_encode(f, &p, iblock->sblk_addrs[u]);
- } /* end if */
+ /* Address of array header for array which owns this block */
+ H5F_addr_encode(f, &image, iblock->hdr->addr);
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ /* Internal information */
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
+ /* Encode elements in index block */
+ if(iblock->hdr->cparam.idx_blk_elmts > 0) {
+ /* Convert from native elements in memory into raw elements on disk */
+ if((iblock->hdr->cparam.cls->encode)(image, iblock->elmts, (size_t)iblock->hdr->cparam.idx_blk_elmts, iblock->hdr->cb_ctx) < 0)
+ H5E_THROW(H5E_CANTENCODE, "can't encode extensible array index elements")
+ image += (iblock->hdr->cparam.idx_blk_elmts * iblock->hdr->cparam.raw_elmt_size);
+ } /* end if */
- /* Write the index block */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_EARRAY_IBLOCK, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_WRITEERROR, "unable to save extensible array index block to disk")
+ /* Encode data block addresses in index block */
+ if(iblock->ndblk_addrs > 0) {
+ size_t u; /* Local index variable */
- iblock->cache_info.is_dirty = FALSE;
+ /* Encode addresses of data blocks in index block */
+ for(u = 0; u < iblock->ndblk_addrs; u++)
+ H5F_addr_encode(f, &image, iblock->dblk_addrs[u]);
} /* end if */
- if(destroy)
- if(H5EA__cache_iblock_dest(f, iblock) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array index block")
-
-CATCH
+ /* Encode data block addresses in index block */
+ if(iblock->nsblk_addrs > 0) {
+ size_t u; /* Local index variable */
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
+ /* Encode addresses of super blocks in index block */
+ for(u = 0; u < iblock->nsblk_addrs; u++)
+ H5F_addr_encode(f, &image, iblock->sblk_addrs[u]);
+ } /* end if */
-END_FUNC(STATIC) /* end H5EA__cache_iblock_flush() */
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
-
-/*-------------------------------------------------------------------------
- * Function: H5EA__cache_iblock_clear
- *
- * Purpose: Mark a extensible array index block in memory as non-dirty.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sept 9 2008
- *
- *-------------------------------------------------------------------------
- */
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA__cache_iblock_clear(H5F_t *f, H5EA_iblock_t *iblock, hbool_t destroy))
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
/* Sanity check */
- HDassert(iblock);
-
- /* Reset the dirty flag */
- iblock->cache_info.is_dirty = FALSE;
-
- if(destroy)
- if(H5EA__cache_iblock_dest(f, iblock) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array index block")
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
CATCH
-END_FUNC(STATIC) /* end H5EA__cache_iblock_clear() */
+END_FUNC(STATIC) /* end H5EA__cache_iblock_serialize() */
/*-------------------------------------------------------------------------
@@ -843,13 +801,16 @@ END_FUNC(STATIC) /* end H5EA__cache_iblock_clear() */
*
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
- * Mar 31 2009
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__cache_iblock_notify(H5AC_notify_action_t action, H5EA_iblock_t *iblock))
+H5EA__cache_iblock_notify(H5AC_notify_action_t action, void *_thing))
+
+ /* Local variables */
+ H5EA_iblock_t *iblock = (H5EA_iblock_t *)_thing; /* Pointer to the object */
/* Sanity check */
HDassert(iblock);
@@ -857,11 +818,16 @@ H5EA__cache_iblock_notify(H5AC_notify_action_t action, H5EA_iblock_t *iblock))
/* Determine which action to take */
switch(action) {
case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
/* Create flush dependency on extensible array header */
if(H5EA__create_flush_depend((H5AC_info_t *)iblock->hdr, (H5AC_info_t *)iblock) < 0)
H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between index block and header, address = %llu", (unsigned long long)iblock->addr)
break;
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
/* Destroy flush dependency on extensible array header */
if(H5EA__destroy_flush_depend((H5AC_info_t *)iblock->hdr, (H5AC_info_t *)iblock) < 0)
@@ -878,165 +844,155 @@ END_FUNC(STATIC) /* end H5EA__cache_iblock_notify() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_iblock_size
+ * Function: H5EA__cache_iblock_free_icr
*
- * Purpose: Compute the size in bytes of a extensible array index block
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sept 9 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
-/* ARGSUSED */
-BEGIN_FUNC(STATIC, NOERR,
-herr_t, SUCCEED, -,
-H5EA__cache_iblock_size(const H5F_t H5_ATTR_UNUSED *f, const H5EA_iblock_t *iblock,
- size_t *size_ptr))
+BEGIN_FUNC(STATIC, ERR,
+herr_t, SUCCEED, FAIL,
+H5EA__cache_iblock_free_icr(void *thing))
- /* Sanity check */
- HDassert(f);
- HDassert(iblock);
- HDassert(size_ptr);
+ /* Check arguments */
+ HDassert(thing);
+
+ /* Release the extensible array index block */
+ if(H5EA__iblock_dest((H5EA_iblock_t *)thing) < 0)
+ H5E_THROW(H5E_CANTFREE, "can't free extensible array index block")
- /* Set size value */
- *size_ptr = iblock->size;
+CATCH
-END_FUNC(STATIC) /* end H5EA__cache_iblock_size() */
+END_FUNC(STATIC) /* end H5EA__cache_iblock_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_iblock_dest
+ * Function: H5EA__cache_sblock_get_load_size
*
- * Purpose: Destroys an extensible array index block in memory.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sep 9 2008
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA__cache_iblock_dest(H5F_t *f, H5EA_iblock_t *iblock))
-
- /* Sanity check */
- HDassert(f);
- HDassert(iblock);
-
- /* Verify that index block is clean */
- HDassert(iblock->cache_info.is_dirty == FALSE);
-
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!iblock->cache_info.free_file_space_on_destroy || H5F_addr_defined(iblock->cache_info.addr));
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_sblock_get_load_size(const void *_udata, size_t *image_len))
- /* Check for freeing file space for extensible array index block */
- if(iblock->cache_info.free_file_space_on_destroy) {
- /* Sanity check address */
- HDassert(H5F_addr_eq(iblock->addr, iblock->cache_info.addr));
+ /* Local variables */
+ const H5EA_sblock_cache_ud_t *udata = (const H5EA_sblock_cache_ud_t *)_udata; /* User data */
+ H5EA_sblock_t sblock; /* Fake super block for computing size */
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_EARRAY_IBLOCK, H5AC_dxpl_id, iblock->cache_info.addr, (hsize_t)iblock->size) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to free extensible array index block")
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(udata->hdr);
+ HDassert(udata->parent);
+ HDassert(udata->sblk_idx > 0);
+ HDassert(H5F_addr_defined(udata->sblk_addr));
+ HDassert(image_len);
+
+ /* Set up fake super block for computing size on disk */
+ /* (Note: extracted from H5EA__sblock_alloc) */
+ HDmemset(&sblock, 0, sizeof(sblock));
+ sblock.hdr = udata->hdr;
+ sblock.ndblks = udata->hdr->sblk_info[udata->sblk_idx].ndblks;
+ sblock.dblk_nelmts = udata->hdr->sblk_info[udata->sblk_idx].dblk_nelmts;
+
+ /* Check if # of elements in data blocks requires paging */
+ if(sblock.dblk_nelmts > udata->hdr->dblk_page_nelmts) {
+ /* Compute # of pages in each data block from this super block */
+ sblock.dblk_npages = sblock.dblk_nelmts / udata->hdr->dblk_page_nelmts;
+
+ /* Sanity check that we have at least 2 pages in data block */
+ HDassert(sblock.dblk_npages > 1);
+
+ /* Sanity check for integer truncation */
+ HDassert((sblock.dblk_npages * udata->hdr->dblk_page_nelmts) == sblock.dblk_nelmts);
+
+ /* Compute size of buffer for each data block's 'page init' bitmask */
+ sblock.dblk_page_init_size = ((sblock.dblk_npages) + 7) / 8;
+ HDassert(sblock.dblk_page_init_size > 0);
} /* end if */
- /* Release the index block */
- if(H5EA__iblock_dest(iblock) < 0)
- H5E_THROW(H5E_CANTFREE, "can't free extensible array index block")
-
-CATCH
+ /* Set the image length size */
+ *image_len = (size_t)H5EA_SBLOCK_SIZE(&sblock);
-END_FUNC(STATIC) /* end H5EA__cache_iblock_dest() */
+END_FUNC(STATIC) /* end H5EA__cache_sblock_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_sblock_load
+ * Function: H5EA__cache_sblock_deserialize
*
- * Purpose: Loads an extensible array super block from the disk.
+ * Purpose: Loads a data structure from the disk.
*
- * Return: Success: Pointer to a new extensible array super block
+ * Return: Success: Pointer to a new B-tree.
* Failure: NULL
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sep 30 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
-H5EA_sblock_t *, NULL, NULL,
-H5EA__cache_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
+void *, NULL, NULL,
+H5EA__cache_sblock_deserialize(const void *_image, size_t len,
+ void *_udata, hbool_t H5_ATTR_UNUSED *dirty))
/* Local variables */
H5EA_sblock_t *sblock = NULL; /* Super block info */
- H5EA_sblock_cache_ud_t *udata = (H5EA_sblock_cache_ud_t *)_udata; /* User data for loading super block */
- size_t size; /* Super block size */
- H5WB_t *wb = NULL; /* Wrapped buffer for super block data */
- uint8_t sblock_buf[H5EA_IBLOCK_BUF_SIZE]; /* Buffer for super block */
- uint8_t *buf; /* Pointer to super block buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ H5EA_sblock_cache_ud_t *udata = (H5EA_sblock_cache_ud_t *)_udata; /* User data */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
haddr_t arr_addr; /* Address of array header in the file */
size_t u; /* Local index variable */
/* Sanity check */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
- HDassert(udata && udata->hdr && udata->parent && udata->sblk_idx > 0);
+ HDassert(udata);
+ HDassert(udata->hdr);
+ HDassert(udata->parent);
+ HDassert(udata->sblk_idx > 0);
+ HDassert(H5F_addr_defined(udata->sblk_addr));
/* Allocate the extensible array super block */
if(NULL == (sblock = H5EA__sblock_alloc(udata->hdr, udata->parent, udata->sblk_idx)))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array super block")
/* Set the extensible array super block's address */
- sblock->addr = addr;
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(sblock_buf, sizeof(sblock_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the extensible array super block on disk */
- size = H5EA_SBLOCK_SIZE(sblock);
-
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Read super block from disk */
- if(H5F_block_read(f, H5FD_MEM_EARRAY_SBLOCK, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_READERROR, "can't read extensible array super block")
-
- /* Get temporary pointer to serialized header */
- p = buf;
+ sblock->addr = udata->sblk_addr;
/* Magic number */
- if(HDmemcmp(p, H5EA_SBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5EA_SBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
H5E_THROW(H5E_BADVALUE, "wrong extensible array super block signature")
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5EA_SBLOCK_VERSION)
+ if(*image++ != H5EA_SBLOCK_VERSION)
H5E_THROW(H5E_VERSION, "wrong extensible array super block version")
/* Extensible array type */
- if(*p++ != (uint8_t)udata->hdr->cparam.cls->id)
+ if(*image++ != (uint8_t)udata->hdr->cparam.cls->id)
H5E_THROW(H5E_BADTYPE, "incorrect extensible array class")
/* Address of header for array that owns this block (just for file integrity checks) */
- H5F_addr_decode(f, &p, &arr_addr);
+ H5F_addr_decode(udata->hdr->f, &image, &arr_addr);
if(H5F_addr_ne(arr_addr, udata->hdr->addr))
H5E_THROW(H5E_BADVALUE, "wrong extensible array header address")
/* Offset of block within the array's address space */
- UINT64DECODE_VAR(p, sblock->block_off, udata->hdr->arr_off_size);
+ UINT64DECODE_VAR(image, sblock->block_off, udata->hdr->arr_off_size);
/* Internal information */
@@ -1045,29 +1001,29 @@ H5EA__cache_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
size_t tot_page_init_size = sblock->ndblks * sblock->dblk_page_init_size; /* Compute total size of 'page init' buffer */
/* Retrieve the 'page init' bitmasks */
- HDmemcpy(sblock->page_init, p, tot_page_init_size);
- p += tot_page_init_size;
+ HDmemcpy(sblock->page_init, image, tot_page_init_size);
+ image += tot_page_init_size;
} /* end if */
/* Decode data block addresses */
for(u = 0; u < sblock->ndblks; u++)
- H5F_addr_decode(f, &p, &sblock->dblk_addrs[u]);
+ H5F_addr_decode(udata->hdr->f, &image, &sblock->dblk_addrs[u]);
/* Sanity check */
/* (allow for checksum not decoded yet) */
- HDassert((size_t)(p - buf) == (size - H5EA_SIZEOF_CHKSUM));
+ HDassert((size_t)(image - (const uint8_t *)_image) == (len - H5EA_SIZEOF_CHKSUM));
/* Save the super block's size */
- sblock->size = size;
+ sblock->size = len;
/* Compute checksum on super block */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - buf) == sblock->size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == sblock->size);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -1079,183 +1035,115 @@ H5EA__cache_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
CATCH
/* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
if(!ret_value)
if(sblock && H5EA__sblock_dest(sblock) < 0)
H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array super block")
-END_FUNC(STATIC) /* end H5EA__cache_sblock_load() */
+END_FUNC(STATIC) /* end H5EA__cache_sblock_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_sblock_flush
+ * Function: H5EA__cache_sblock_image_len
*
- * Purpose: Flushes a dirty extensible array super block to disk.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sep 30 2008
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA__cache_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- H5EA_sblock_t *sblock, unsigned H5_ATTR_UNUSED * flags_ptr))
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_sblock_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- H5WB_t *wb = NULL; /* Wrapped buffer for serializing data */
- uint8_t ser_buf[H5EA_SBLOCK_BUF_SIZE]; /* Serialization buffer */
+ const H5EA_sblock_t *sblock = (const H5EA_sblock_t *)_thing; /* Pointer to the object */
- /* Sanity check */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(sblock);
- HDassert(sblock->hdr);
+ HDassert(image_len);
- if(sblock->cache_info.is_dirty) {
- uint8_t *buf; /* Temporary raw data buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Index block size on disk */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
- size_t u; /* Local index variable */
+ /* Set the image length size */
+ *image_len = sblock->size;
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(ser_buf, sizeof(ser_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the super block on disk */
- size = sblock->size;
-
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Get temporary pointer to serialized info */
- p = buf;
-
- /* Magic number */
- HDmemcpy(p, H5EA_SBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* Version # */
- *p++ = H5EA_SBLOCK_VERSION;
-
- /* Extensible array type */
- *p++ = sblock->hdr->cparam.cls->id;
-
- /* Address of array header for array which owns this block */
- H5F_addr_encode(f, &p, sblock->hdr->addr);
-
- /* Offset of block in array */
- UINT64ENCODE_VAR(p, sblock->block_off, sblock->hdr->arr_off_size);
-
- /* Internal information */
-
- /* Check for 'page init' bitmasks for this super block */
- if(sblock->dblk_npages > 0) {
- size_t tot_page_init_size = sblock->ndblks * sblock->dblk_page_init_size; /* Compute total size of 'page init' buffer */
-
- /* Store the 'page init' bitmasks */
- HDmemcpy(p, sblock->page_init, tot_page_init_size);
- p += tot_page_init_size;
- } /* end if */
-
- /* Encode addresses of data blocks in super block */
- for(u = 0; u < sblock->ndblks; u++)
- H5F_addr_encode(f, &p, sblock->dblk_addrs[u]);
-
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
-
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
-
- /* Write the super block */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_EARRAY_SBLOCK, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_WRITEERROR, "unable to save extensible array super block to disk")
-
- sblock->cache_info.is_dirty = FALSE;
- } /* end if */
-
- if(destroy)
- if(H5EA__cache_sblock_dest(f, sblock) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array super block")
-
-CATCH
-
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
-
-END_FUNC(STATIC) /* end H5EA__cache_sblock_flush() */
+END_FUNC(STATIC) /* end H5EA__cache_sblock_image_len() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_sblock_clear
+ * Function: H5EA__cache_sblock_serialize
*
- * Purpose: Mark a extensible array super block in memory as non-dirty.
+ * Purpose: Flushes a dirty object to disk.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sept 30 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA__cache_sblock_clear(H5F_t *f, H5EA_sblock_t *sblock, hbool_t destroy))
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_sblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing))
- /* Sanity check */
+ /* Local variables */
+ H5EA_sblock_t *sblock = (H5EA_sblock_t *)_thing; /* Pointer to the object to serialize */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
+ size_t u; /* Local index variable */
+
+ /* check arguments */
+ HDassert(f);
+ HDassert(image);
HDassert(sblock);
+ HDassert(sblock->hdr);
- /* Reset the dirty flag */
- sblock->cache_info.is_dirty = FALSE;
+ /* Magic number */
+ HDmemcpy(image, H5EA_SBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- if(destroy)
- if(H5EA__cache_sblock_dest(f, sblock) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array super block")
+ /* Version # */
+ *image++ = H5EA_SBLOCK_VERSION;
-CATCH
+ /* Extensible array type */
+ *image++ = sblock->hdr->cparam.cls->id;
-END_FUNC(STATIC) /* end H5EA__cache_sblock_clear() */
+ /* Address of array header for array which owns this block */
+ H5F_addr_encode(f, &image, sblock->hdr->addr);
-
-/*-------------------------------------------------------------------------
- * Function: H5EA__cache_sblock_size
- *
- * Purpose: Compute the size in bytes of a extensible array super block
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sept 30 2008
- *
- *-------------------------------------------------------------------------
- */
-/* ARGSUSED */
-BEGIN_FUNC(STATIC, NOERR,
-herr_t, SUCCEED, -,
-H5EA__cache_sblock_size(const H5F_t H5_ATTR_UNUSED *f, const H5EA_sblock_t *sblock,
- size_t *size_ptr))
+ /* Offset of block in array */
+ UINT64ENCODE_VAR(image, sblock->block_off, sblock->hdr->arr_off_size);
- /* Sanity check */
- HDassert(sblock);
- HDassert(size_ptr);
+ /* Internal information */
+
+ /* Check for 'page init' bitmasks for this super block */
+ if(sblock->dblk_npages > 0) {
+ size_t tot_page_init_size = sblock->ndblks * sblock->dblk_page_init_size; /* Compute total size of 'page init' buffer */
- /* Set size value */
- *size_ptr = sblock->size;
+ /* Store the 'page init' bitmasks */
+ HDmemcpy(image, sblock->page_init, tot_page_init_size);
+ image += tot_page_init_size;
+ } /* end if */
+
+ /* Encode addresses of data blocks in super block */
+ for(u = 0; u < sblock->ndblks; u++)
+ H5F_addr_encode(f, &image, sblock->dblk_addrs[u]);
+
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
+
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
+
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
-END_FUNC(STATIC) /* end H5EA__cache_sblock_size() */
+END_FUNC(STATIC) /* end H5EA__cache_sblock_serialize() */
/*-------------------------------------------------------------------------
@@ -1273,7 +1161,10 @@ END_FUNC(STATIC) /* end H5EA__cache_sblock_size() */
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__cache_sblock_notify(H5AC_notify_action_t action, H5EA_sblock_t *sblock))
+H5EA__cache_sblock_notify(H5AC_notify_action_t action, void *_thing))
+
+ /* Local variables */
+ H5EA_sblock_t *sblock = (H5EA_sblock_t *)_thing; /* Pointer to the object */
/* Sanity check */
HDassert(sblock);
@@ -1281,11 +1172,16 @@ H5EA__cache_sblock_notify(H5AC_notify_action_t action, H5EA_sblock_t *sblock))
/* Determine which action to take */
switch(action) {
case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
/* Create flush dependency on index block */
if(H5EA__create_flush_depend((H5AC_info_t *)sblock->parent, (H5AC_info_t *)sblock) < 0)
H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between super block and index block, address = %llu", (unsigned long long)sblock->addr)
break;
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
/* Destroy flush dependency on index block */
if(H5EA__destroy_flush_depend((H5AC_info_t *)sblock->parent, (H5AC_info_t *)sblock) < 0)
@@ -1302,135 +1198,158 @@ END_FUNC(STATIC) /* end H5EA__cache_sblock_notify() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_sblock_dest
+ * Function: H5EA__cache_sblock_free_icr
*
- * Purpose: Destroys an extensible array super block in memory.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sep 30 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__cache_sblock_dest(H5F_t *f, H5EA_sblock_t *sblock))
+H5EA__cache_sblock_free_icr(void *thing))
- /* Sanity check */
- HDassert(f);
- HDassert(sblock);
+ /* Check arguments */
+ HDassert(thing);
- /* Verify that super block is clean */
- HDassert(sblock->cache_info.is_dirty == FALSE);
+ /* Release the extensible array super block */
+ if(H5EA__sblock_dest((H5EA_sblock_t *)thing) < 0)
+ H5E_THROW(H5E_CANTFREE, "can't free extensible array super block")
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!sblock->cache_info.free_file_space_on_destroy || H5F_addr_defined(sblock->cache_info.addr));
+CATCH
- /* Check for freeing file space for extensible array super block */
- if(sblock->cache_info.free_file_space_on_destroy) {
- /* Sanity check address */
- HDassert(H5F_addr_eq(sblock->addr, sblock->cache_info.addr));
+END_FUNC(STATIC) /* end H5EA__cache_sblock_free_icr() */
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_EARRAY_SBLOCK, H5AC_dxpl_id, sblock->cache_info.addr, (hsize_t)sblock->size) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to free extensible array super block")
- } /* end if */
+
+/*-------------------------------------------------------------------------
+ * Function: H5EA__cache_dblock_get_load_size
+ *
+ * Purpose: Compute the size of the data structure on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 17, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_dblock_get_load_size(const void *_udata, size_t *image_len))
- /* Release the super block */
- if(H5EA__sblock_dest(sblock) < 0)
- H5E_THROW(H5E_CANTFREE, "can't free extensible array super block")
+ /* Local variables */
+ const H5EA_dblock_cache_ud_t *udata = (const H5EA_dblock_cache_ud_t *)_udata; /* User data */
+ H5EA_dblock_t dblock; /* Fake data block for computing size */
-CATCH
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(udata->hdr);
+ HDassert(udata->parent);
+ HDassert(udata->nelmts > 0);
+ HDassert(image_len);
+
+ /* Set up fake data block for computing size on disk */
+ /* (Note: extracted from H5EA__dblock_alloc) */
+ HDmemset(&dblock, 0, sizeof(dblock));
+
+ /* need to set:
+ *
+ * dblock.hdr
+ * dblock.npages
+ * dblock.nelmts
+ *
+ * before we invoke either H5EA_DBLOCK_PREFIX_SIZE() or
+ * H5EA_DBLOCK_SIZE().
+ */
+ dblock.hdr = udata->hdr;
+ dblock.nelmts = udata->nelmts;
+
+ if(udata->nelmts > udata->hdr->dblk_page_nelmts) {
+ /* Set the # of pages in the direct block */
+ dblock.npages = udata->nelmts / udata->hdr->dblk_page_nelmts;
+ HDassert(udata->nelmts==(dblock.npages * udata->hdr->dblk_page_nelmts));
+ } /* end if */
-END_FUNC(STATIC) /* end H5EA__cache_sblock_dest() */
+ /* Set the image length size */
+ if(!dblock.npages)
+ *image_len = H5EA_DBLOCK_SIZE(&dblock);
+ else
+ *image_len = H5EA_DBLOCK_PREFIX_SIZE(&dblock);
+
+END_FUNC(STATIC) /* end H5EA__cache_dblock_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_dblock_load
+ * Function: H5EA__cache_dblock_deserialize
*
- * Purpose: Loads an extensible array data block from the disk.
+ * Purpose: Loads a data structure from the disk.
*
- * Return: Success: Pointer to a new extensible array data block
+ * Return: Success: Pointer to a new B-tree.
* Failure: NULL
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sep 16 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
-H5EA_dblock_t *, NULL, NULL,
-H5EA__cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
+void *, NULL, NULL,
+H5EA__cache_dblock_deserialize(const void *_image, size_t len,
+ void *_udata, hbool_t H5_ATTR_UNUSED *dirty))
/* Local variables */
H5EA_dblock_t *dblock = NULL; /* Data block info */
- H5EA_dblock_cache_ud_t *udata = (H5EA_dblock_cache_ud_t *)_udata; /* User data for loading data block */
- size_t size; /* Data block size */
- H5WB_t *wb = NULL; /* Wrapped buffer for data block data */
- uint8_t dblock_buf[H5EA_DBLOCK_BUF_SIZE]; /* Buffer for data block */
- uint8_t *buf; /* Pointer to data block buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ H5EA_dblock_cache_ud_t *udata = (H5EA_dblock_cache_ud_t *)_udata; /* User data */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
haddr_t arr_addr; /* Address of array header in the file */
- /* Sanity check */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
- HDassert(udata && udata->hdr && udata->parent && udata->nelmts > 0);
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(udata->hdr);
+ HDassert(udata->parent);
+ HDassert(udata->nelmts > 0);
+ HDassert(H5F_addr_defined(udata->dblk_addr));
/* Allocate the extensible array data block */
if(NULL == (dblock = H5EA__dblock_alloc(udata->hdr, udata->parent, udata->nelmts)))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array data block")
- /* Set the extensible array data block's information */
- dblock->addr = addr;
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(dblock_buf, sizeof(dblock_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
+ HDassert(((!dblock->npages) && (len == H5EA_DBLOCK_SIZE(dblock))) ||
+ (len == H5EA_DBLOCK_PREFIX_SIZE(dblock)));
- /* Compute the size of the extensible array data block on disk */
- if(!dblock->npages)
- size = H5EA_DBLOCK_SIZE(dblock);
- else
- size = H5EA_DBLOCK_PREFIX_SIZE(dblock);
-
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Read data block from disk */
- if(H5F_block_read(f, H5FD_MEM_EARRAY_DBLOCK, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_READERROR, "can't read extensible array data block")
-
- /* Get temporary pointer to serialized header */
- p = buf;
+ /* Set the extensible array data block's information */
+ dblock->addr = udata->dblk_addr;
/* Magic number */
- if(HDmemcmp(p, H5EA_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5EA_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
H5E_THROW(H5E_BADVALUE, "wrong extensible array data block signature")
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5EA_DBLOCK_VERSION)
+ if(*image++ != H5EA_DBLOCK_VERSION)
H5E_THROW(H5E_VERSION, "wrong extensible array data block version")
/* Extensible array type */
- if(*p++ != (uint8_t)udata->hdr->cparam.cls->id)
+ if(*image++ != (uint8_t)udata->hdr->cparam.cls->id)
H5E_THROW(H5E_BADTYPE, "incorrect extensible array class")
/* Address of header for array that owns this block (just for file integrity checks) */
- H5F_addr_decode(f, &p, &arr_addr);
+ H5F_addr_decode(udata->hdr->f, &image, &arr_addr);
if(H5F_addr_ne(arr_addr, udata->hdr->addr))
H5E_THROW(H5E_BADVALUE, "wrong extensible array header address")
/* Offset of block within the array's address space */
- UINT64DECODE_VAR(p, dblock->block_off, udata->hdr->arr_off_size);
+ UINT64DECODE_VAR(image, dblock->block_off, udata->hdr->arr_off_size);
/* Internal information */
@@ -1438,26 +1357,27 @@ H5EA__cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
if(!dblock->npages) {
/* Decode elements in data block */
/* Convert from raw elements on disk into native elements in memory */
- if((udata->hdr->cparam.cls->decode)(p, dblock->elmts, udata->nelmts, udata->hdr->cb_ctx) < 0)
+ if((udata->hdr->cparam.cls->decode)(image, dblock->elmts, udata->nelmts, udata->hdr->cb_ctx) < 0)
H5E_THROW(H5E_CANTDECODE, "can't decode extensible array data elements")
- p += (udata->nelmts * udata->hdr->cparam.raw_elmt_size);
+ image += (udata->nelmts * udata->hdr->cparam.raw_elmt_size);
} /* end if */
/* Sanity check */
/* (allow for checksum not decoded yet) */
- HDassert((size_t)(p - buf) == (size - H5EA_SIZEOF_CHKSUM));
+ HDassert((size_t)(image - (const uint8_t *)_image) == (len - H5EA_SIZEOF_CHKSUM));
/* Set the data block's size */
+ /* (Note: This is not the same as the image length, for paged data blocks) */
dblock->size = H5EA_DBLOCK_SIZE(dblock);
/* Compute checksum on data block */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - buf) == size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == len);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -1469,151 +1389,116 @@ H5EA__cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
CATCH
/* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
if(!ret_value)
if(dblock && H5EA__dblock_dest(dblock) < 0)
H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array data block")
-END_FUNC(STATIC) /* end H5EA__cache_dblock_load() */
+END_FUNC(STATIC) /* end H5EA__cache_dblock_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_dblock_flush
+ * Function: H5EA__cache_dblock_image_len
*
- * Purpose: Flushes a dirty extensible array data block to disk.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sep 18 2008
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA__cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- H5EA_dblock_t *dblock, unsigned H5_ATTR_UNUSED * flags_ptr))
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_dblock_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- H5WB_t *wb = NULL; /* Wrapped buffer for serializing data */
- uint8_t ser_buf[H5EA_DBLOCK_BUF_SIZE]; /* Serialization buffer */
+ const H5EA_dblock_t *dblock = (const H5EA_dblock_t *)_thing; /* Pointer to the object */
- /* Sanity check */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(dblock);
- HDassert(dblock->hdr);
+ HDassert(image_len);
- if(dblock->cache_info.is_dirty) {
- uint8_t *buf; /* Temporary raw data buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Index block size on disk */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(ser_buf, sizeof(ser_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the data block on disk */
- if(!dblock->npages)
- size = dblock->size;
- else
- size = H5EA_DBLOCK_PREFIX_SIZE(dblock);
-
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Get temporary pointer to serialized info */
- p = buf;
-
- /* Magic number */
- HDmemcpy(p, H5EA_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* Version # */
- *p++ = H5EA_DBLOCK_VERSION;
-
- /* Extensible array type */
- *p++ = dblock->hdr->cparam.cls->id;
-
- /* Address of array header for array which owns this block */
- H5F_addr_encode(f, &p, dblock->hdr->addr);
-
- /* Offset of block in array */
- UINT64ENCODE_VAR(p, dblock->block_off, dblock->hdr->arr_off_size);
-
- /* Internal information */
-
- /* Only encode elements if the data block is not paged */
- if(!dblock->npages) {
- /* Encode elements in data block */
-
- /* Convert from native elements in memory into raw elements on disk */
- if((dblock->hdr->cparam.cls->encode)(p, dblock->elmts, dblock->nelmts, dblock->hdr->cb_ctx) < 0)
- H5E_THROW(H5E_CANTENCODE, "can't encode extensible array data elements")
- p += (dblock->nelmts * dblock->hdr->cparam.raw_elmt_size);
- } /* end if */
-
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
-
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
-
- /* Write the data block */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_EARRAY_DBLOCK, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_WRITEERROR, "unable to save extensible array data block to disk")
-
- dblock->cache_info.is_dirty = FALSE;
- } /* end if */
-
- if(destroy)
- if(H5EA__cache_dblock_dest(f, dblock) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array data block")
-
-CATCH
-
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
+ /* Set the image length size */
+ if(!dblock->npages)
+ *image_len = dblock->size;
+ else
+ *image_len = (size_t)H5EA_DBLOCK_PREFIX_SIZE(dblock);
-END_FUNC(STATIC) /* end H5EA__cache_dblock_flush() */
+END_FUNC(STATIC) /* end H5EA__cache_dblock_image_len() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_dblock_clear
+ * Function: H5EA__cache_dblock_serialize
*
- * Purpose: Mark a extensible array data block in memory as non-dirty.
+ * Purpose: Flushes a dirty object to disk.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sept 18 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__cache_dblock_clear(H5F_t *f, H5EA_dblock_t *dblock, hbool_t destroy))
+H5EA__cache_dblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing))
- /* Sanity check */
+ /* Local variables */
+ H5EA_dblock_t *dblock = (H5EA_dblock_t *)_thing; /* Pointer to the object to serialize */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
+
+ /* check arguments */
+ HDassert(f);
+ HDassert(image);
HDassert(dblock);
+ HDassert(dblock->hdr);
- /* Reset the dirty flag */
- dblock->cache_info.is_dirty = FALSE;
+ /* Magic number */
+ HDmemcpy(image, H5EA_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- if(destroy)
- if(H5EA__cache_dblock_dest(f, dblock) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array data block")
+ /* Version # */
+ *image++ = H5EA_DBLOCK_VERSION;
+
+ /* Extensible array type */
+ *image++ = dblock->hdr->cparam.cls->id;
+
+ /* Address of array header for array which owns this block */
+ H5F_addr_encode(f, &image, dblock->hdr->addr);
+
+ /* Offset of block in array */
+ UINT64ENCODE_VAR(image, dblock->block_off, dblock->hdr->arr_off_size);
+
+ /* Internal information */
+
+ /* Only encode elements if the data block is not paged */
+ if(!dblock->npages) {
+ /* Encode elements in data block */
+
+ /* Convert from native elements in memory into raw elements on disk */
+ if((dblock->hdr->cparam.cls->encode)(image, dblock->elmts, dblock->nelmts, dblock->hdr->cb_ctx) < 0)
+ H5E_THROW(H5E_CANTENCODE, "can't encode extensible array data elements")
+ image += (dblock->nelmts * dblock->hdr->cparam.raw_elmt_size);
+ } /* end if */
+
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
+
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
+
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
CATCH
-END_FUNC(STATIC) /* end H5EA__cache_dblock_clear() */
+END_FUNC(STATIC) /* end H5EA__cache_dblock_serialize() */
/*-------------------------------------------------------------------------
@@ -1631,19 +1516,27 @@ END_FUNC(STATIC) /* end H5EA__cache_dblock_clear() */
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__cache_dblock_notify(H5AC_notify_action_t action, H5EA_dblock_t *dblock))
+H5EA__cache_dblock_notify(H5AC_notify_action_t action, void *_thing))
- /* Sanity check */
+ /* Local variables */
+ H5EA_dblock_t *dblock = (H5EA_dblock_t *)_thing; /* Pointer to the object */
+
+ /* Check arguments */
HDassert(dblock);
/* Determine which action to take */
switch(action) {
case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
/* Create flush dependency on parent */
if(H5EA__create_flush_depend((H5AC_info_t *)dblock->parent, (H5AC_info_t *)dblock) < 0)
H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between data block and parent, address = %llu", (unsigned long long)dblock->addr)
break;
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
/* Destroy flush dependency on parent */
if(H5EA__destroy_flush_depend((H5AC_info_t *)dblock->parent, (H5AC_info_t *)dblock) < 0)
@@ -1660,171 +1553,175 @@ END_FUNC(STATIC) /* end H5EA__cache_dblock_notify() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_dblock_size
+ * Function: H5EA__cache_dblock_free_icr
*
- * Purpose: Compute the size in bytes of a extensible array data block
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sept 18 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
-/* ARGSUSED */
-BEGIN_FUNC(STATIC, NOERR,
-herr_t, SUCCEED, -,
-H5EA__cache_dblock_size(const H5F_t H5_ATTR_UNUSED *f, const H5EA_dblock_t *dblock,
- size_t *size_ptr))
+BEGIN_FUNC(STATIC, ERR,
+herr_t, SUCCEED, FAIL,
+H5EA__cache_dblock_free_icr(void *thing))
- /* Sanity check */
- HDassert(f);
- HDassert(dblock);
- HDassert(size_ptr);
+ /* Check arguments */
+ HDassert(thing);
- /* Set size value */
- if(!dblock->npages)
- *size_ptr = dblock->size;
- else
- *size_ptr = H5EA_DBLOCK_PREFIX_SIZE(dblock);
+ /* Release the extensible array data block */
+ if(H5EA__dblock_dest((H5EA_dblock_t *)thing) < 0)
+ H5E_THROW(H5E_CANTFREE, "can't free extensible array data block")
+
+CATCH
-END_FUNC(STATIC) /* end H5EA__cache_dblock_size() */
+END_FUNC(STATIC) /* end H5EA__cache_dblock_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_dblock_dest
+ * Function: H5EA__cache_dblock_fsf_size
*
- * Purpose: Destroys an extensible array data block in memory.
+ * Purpose: Tell the metadata cache the actual amount of file space
+ * to free when a dblock entry is destroyed with the free
+ * file space block set.
*
- * Return: Non-negative on success/Negative on failure
+ * This function is needed when the data block is paged, as
+ * the datablock header and all its pages are allocted as a
+ * single contiguous chunk of file space, and must be
+ * deallocated the same way.
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Sep 18 2008
+ * The size of the chunk of memory in which the dblock
+ * header and all its pages is stored in the size field,
+ * so we simply pass that value back to the cache.
+ *
+ * If the datablock is not paged, then the size field of
+ * the cache_info contains the correct size. However this
+ * value will be the same as the size field, so we return
+ * the contents of the size field to the cache in this case
+ * as well.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 12/5/14
*
*-------------------------------------------------------------------------
*/
-/* ARGSUSED */
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA__cache_dblock_dest(H5F_t *f, H5EA_dblock_t *dblock))
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_dblock_fsf_size(const void *_thing, size_t *fsf_size))
- /* Sanity check */
- HDassert(f);
+ /* Local variables */
+ const H5EA_dblock_t *dblock = (const H5EA_dblock_t *)_thing; /* Pointer to the object */
+
+ /* Check arguments */
HDassert(dblock);
+ HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(dblock->cache_info.type == H5AC_EARRAY_DBLOCK);
+ HDassert(fsf_size);
- /* Verify that data block is clean */
- HDassert(dblock->cache_info.is_dirty == FALSE);
+ *fsf_size = dblock->size;
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!dblock->cache_info.free_file_space_on_destroy || H5F_addr_defined(dblock->cache_info.addr));
+END_FUNC(STATIC) /* end H5EA__cache_dblock_fsf_size() */
- /* Check for freeing file space for extensible array data block */
- if(dblock->cache_info.free_file_space_on_destroy) {
- /* Sanity check address */
- HDassert(H5F_addr_eq(dblock->addr, dblock->cache_info.addr));
+
+/*-------------------------------------------------------------------------
+ * Function: H5EA__cache_dblk_page_get_load_size
+ *
+ * Purpose: Compute the size of the data structure on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 17, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_dblk_page_get_load_size(const void *_udata, size_t *image_len))
- /* Release the space on disk */
- /* (Includes space for pages!) */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_EARRAY_DBLOCK, H5AC_dxpl_id, dblock->cache_info.addr, (hsize_t)dblock->size) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to free extensible array data block")
- } /* end if */
+ /* Local variables */
+ const H5EA_dblk_page_cache_ud_t *udata = (const H5EA_dblk_page_cache_ud_t *)_udata; /* User data */
- /* Release the data block */
- if(H5EA__dblock_dest(dblock) < 0)
- H5E_THROW(H5E_CANTFREE, "can't free extensible array data block")
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(udata->hdr);
+ HDassert(udata->parent);
+ HDassert(image_len);
-CATCH
+ *image_len = (size_t)H5EA_DBLK_PAGE_SIZE(udata->hdr);
-END_FUNC(STATIC) /* end H5EA__cache_dblock_dest() */
+END_FUNC(STATIC) /* end H5EA__cache_dblk_page_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_dblk_page_load
+ * Function: H5EA__cache_dblk_page_deserialize
*
- * Purpose: Loads an extensible array data block page from the disk.
+ * Purpose: Loads a data structure from the disk.
*
- * Return: Success: Pointer to a new extensible array data block page
+ * Return: Success: Pointer to a new B-tree.
* Failure: NULL
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Nov 20 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
-H5EA_dblk_page_t *, NULL, NULL,
-H5EA__cache_dblk_page_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
+void *, NULL, NULL,
+H5EA__cache_dblk_page_deserialize(const void *_image, size_t len,
+ void *_udata, hbool_t H5_ATTR_UNUSED *dirty))
/* Local variables */
H5EA_dblk_page_t *dblk_page = NULL; /* Data block page info */
H5EA_dblk_page_cache_ud_t *udata = (H5EA_dblk_page_cache_ud_t *)_udata; /* User data for loading data block page */
- size_t size; /* Data block page size */
- H5WB_t *wb = NULL; /* Wrapped buffer for data block page data */
- uint8_t dblk_page_buf[H5EA_DBLK_PAGE_BUF_SIZE]; /* Buffer for data block page */
- uint8_t *buf; /* Pointer to data block page buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
/* Sanity check */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
- HDassert(udata && udata->hdr && udata->parent);
+ HDassert(udata);
+ HDassert(udata->hdr);
+ HDassert(udata->parent);
+ HDassert(H5F_addr_defined(udata->dblk_page_addr));
/* Allocate the extensible array data block page */
if(NULL == (dblk_page = H5EA__dblk_page_alloc(udata->hdr, udata->parent)))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array data block page")
- /* Set the extensible array data block's information */
- dblk_page->addr = addr;
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(dblk_page_buf, sizeof(dblk_page_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the extensible array data block page on disk */
- size = H5EA_DBLK_PAGE_SIZE(udata->hdr);
-
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Read data block page from disk */
- if(H5F_block_read(f, H5FD_MEM_EARRAY_DBLK_PAGE, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_READERROR, "can't read extensible array data block page")
-
- /* Get temporary pointer to serialized header */
- p = buf;
+ /* Set the extensible array data block page's information */
+ dblk_page->addr = udata->dblk_page_addr;
/* Internal information */
/* Decode elements in data block page */
/* Convert from raw elements on disk into native elements in memory */
- if((udata->hdr->cparam.cls->decode)(p, dblk_page->elmts, udata->hdr->dblk_page_nelmts, udata->hdr->cb_ctx) < 0)
+ if((udata->hdr->cparam.cls->decode)(image, dblk_page->elmts, udata->hdr->dblk_page_nelmts, udata->hdr->cb_ctx) < 0)
H5E_THROW(H5E_CANTDECODE, "can't decode extensible array data elements")
- p += (udata->hdr->dblk_page_nelmts * udata->hdr->cparam.raw_elmt_size);
+ image += (udata->hdr->dblk_page_nelmts * udata->hdr->cparam.raw_elmt_size);
/* Sanity check */
/* (allow for checksum not decoded yet) */
- HDassert((size_t)(p - buf) == (size - H5EA_SIZEOF_CHKSUM));
+ HDassert((size_t)(image - (const uint8_t *)_image) == (len - H5EA_SIZEOF_CHKSUM));
/* Set the data block page's size */
- dblk_page->size = size;
+ dblk_page->size = len;
- /* Compute checksum on data block */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ /* Compute checksum on data block page */
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - buf) == dblk_page->size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == dblk_page->size);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -1836,129 +1733,94 @@ H5EA__cache_dblk_page_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
CATCH
/* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
if(!ret_value)
if(dblk_page && H5EA__dblk_page_dest(dblk_page) < 0)
H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array data block page")
-END_FUNC(STATIC) /* end H5EA__cache_dblk_page_load() */
+END_FUNC(STATIC) /* end H5EA__cache_dblk_page_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_dblk_page_flush
+ * Function: H5EA__cache_dblk_page_image_len
*
- * Purpose: Flushes a dirty extensible array data block page to disk.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Nov 20 2008
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA__cache_dblk_page_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- H5EA_dblk_page_t *dblk_page, unsigned H5_ATTR_UNUSED * flags_ptr))
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5EA__cache_dblk_page_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- H5WB_t *wb = NULL; /* Wrapped buffer for serializing data */
- uint8_t ser_buf[H5EA_DBLK_PAGE_BUF_SIZE]; /* Serialization buffer */
+ const H5EA_dblk_page_t *dblk_page = (const H5EA_dblk_page_t *)_thing; /* Pointer to the object */
- /* Sanity check */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(dblk_page);
- HDassert(dblk_page->hdr);
-
- if(dblk_page->cache_info.is_dirty) {
- uint8_t *buf; /* Temporary raw data buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Index block size on disk */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(ser_buf, sizeof(ser_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the data block on disk */
- size = dblk_page->size;
-
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Get temporary pointer to serialized info */
- p = buf;
-
- /* Internal information */
-
- /* Encode elements in data block page */
-
- /* Convert from native elements in memory into raw elements on disk */
- if((dblk_page->hdr->cparam.cls->encode)(p, dblk_page->elmts, dblk_page->hdr->dblk_page_nelmts, dblk_page->hdr->cb_ctx) < 0)
- H5E_THROW(H5E_CANTENCODE, "can't encode extensible array data elements")
- p += (dblk_page->hdr->dblk_page_nelmts * dblk_page->hdr->cparam.raw_elmt_size);
-
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
-
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
-
- /* Write the data block */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_EARRAY_DBLK_PAGE, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_WRITEERROR, "unable to save extensible array data block page to disk")
-
- dblk_page->cache_info.is_dirty = FALSE;
- } /* end if */
+ HDassert(image_len);
- if(destroy)
- if(H5EA__cache_dblk_page_dest(f, dblk_page) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array data block page")
-
-CATCH
-
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
+ /* Set the image length size */
+ *image_len = dblk_page->size;
-END_FUNC(STATIC) /* end H5EA__cache_dblk_page_flush() */
+END_FUNC(STATIC) /* end H5EA__cache_dblk_page_image_len() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_dblk_page_clear
+ * Function: H5EA__cache_dblk_page_serialize
*
- * Purpose: Mark a extensible array data block page in memory as non-dirty.
+ * Purpose: Flushes a dirty object to disk.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Nov 20 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__cache_dblk_page_clear(H5F_t *f, H5EA_dblk_page_t *dblk_page, hbool_t destroy))
+H5EA__cache_dblk_page_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing))
- /* Sanity check */
+ /* Local variables */
+ H5EA_dblk_page_t *dblk_page = (H5EA_dblk_page_t *)_thing; /* Pointer to the object to serialize */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
+
+ /* Check arguments */
+ HDassert(f);
+ HDassert(image);
HDassert(dblk_page);
+ HDassert(dblk_page->hdr);
+
+ /* Internal information */
- /* Reset the dirty flag */
- dblk_page->cache_info.is_dirty = FALSE;
+ /* Encode elements in data block page */
- if(destroy)
- if(H5EA__cache_dblk_page_dest(f, dblk_page) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array data block page")
+ /* Convert from native elements in memory into raw elements on disk */
+ if((dblk_page->hdr->cparam.cls->encode)(image, dblk_page->elmts, dblk_page->hdr->dblk_page_nelmts, dblk_page->hdr->cb_ctx) < 0)
+ H5E_THROW(H5E_CANTENCODE, "can't encode extensible array data elements")
+ image += (dblk_page->hdr->dblk_page_nelmts * dblk_page->hdr->cparam.raw_elmt_size);
+
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
+
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
+
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
CATCH
-END_FUNC(STATIC) /* end H5EA__cache_dblk_page_clear() */
+END_FUNC(STATIC) /* end H5EA__cache_dblk_page_serialize() */
/*-------------------------------------------------------------------------
@@ -1976,7 +1838,10 @@ END_FUNC(STATIC) /* end H5EA__cache_dblk_page_clear() */
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__cache_dblk_page_notify(H5AC_notify_action_t action, H5EA_dblk_page_t *dblk_page))
+H5EA__cache_dblk_page_notify(H5AC_notify_action_t action, void *_thing))
+
+ /* Local variables */
+ H5EA_dblk_page_t *dblk_page = (H5EA_dblk_page_t *)_thing; /* Pointer to the object */
/* Sanity check */
HDassert(dblk_page);
@@ -1984,11 +1849,16 @@ H5EA__cache_dblk_page_notify(H5AC_notify_action_t action, H5EA_dblk_page_t *dblk
/* Determine which action to take */
switch(action) {
case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
/* Create flush dependency on parent */
if(H5EA__create_flush_depend((H5AC_info_t *)dblk_page->parent, (H5AC_info_t *)dblk_page) < 0)
H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between data block page and parent, address = %llu", (unsigned long long)dblk_page->addr)
break;
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
/* Destroy flush dependency on parent */
if(H5EA__destroy_flush_depend((H5AC_info_t *)dblk_page->parent, (H5AC_info_t *)dblk_page) < 0)
@@ -2005,70 +1875,31 @@ END_FUNC(STATIC) /* end H5EA__cache_dblk_page_notify() */
/*-------------------------------------------------------------------------
- * Function: H5EA__cache_dblk_page_size
+ * Function: H5EA__cache_dblk_page_free_icr
*
- * Purpose: Compute the size in bytes of a extensible array data block page
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Nov 20 2008
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
-/* ARGSUSED */
-BEGIN_FUNC(STATIC, NOERR,
-herr_t, SUCCEED, -,
-H5EA__cache_dblk_page_size(const H5F_t H5_ATTR_UNUSED *f, const H5EA_dblk_page_t *dblk_page,
- size_t *size_ptr))
-
- /* Sanity check */
- HDassert(f);
- HDassert(dblk_page);
- HDassert(size_ptr);
-
- /* Set size value */
- *size_ptr = dblk_page->size;
-
-END_FUNC(STATIC) /* end H5EA__cache_dblk_page_size() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5EA__cache_dblk_page_dest
- *
- * Purpose: Destroys an extensible array data block page in memory.
- *
- * Note: Does _not_ free the space for the page on disk, that is
- * handled through the data block that "owns" the page.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Nov 20 2008
- *
- *-------------------------------------------------------------------------
- */
-/* ARGSUSED */
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__cache_dblk_page_dest(H5F_t H5_ATTR_UNUSED *f, H5EA_dblk_page_t *dblk_page))
-
- /* Sanity check */
- HDassert(f);
- HDassert(dblk_page);
+H5EA__cache_dblk_page_free_icr(void *thing))
- /* Verify that data block page is clean */
- HDassert(dblk_page->cache_info.is_dirty == FALSE);
+ /* Check arguments */
+ HDassert(thing);
- /* Release the data block page */
- if(H5EA__dblk_page_dest(dblk_page) < 0)
+ /* Release the extensible array data block page */
+ if(H5EA__dblk_page_dest((H5EA_dblk_page_t *)thing) < 0)
H5E_THROW(H5E_CANTFREE, "can't free extensible array data block page")
CATCH
-END_FUNC(STATIC) /* end H5EA__cache_dblk_page_dest() */
+END_FUNC(STATIC) /* end H5EA__cache_dblk_page_free_icr() */
diff --git a/src/H5EAdbg.c b/src/H5EAdbg.c
index ff55722..9c3ce6d 100644
--- a/src/H5EAdbg.c
+++ b/src/H5EAdbg.c
@@ -119,7 +119,7 @@ H5EA__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
} /* end if */
/* Load the extensible array header */
- if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Print opening message */
@@ -218,14 +218,14 @@ H5EA__iblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t H5_ATTR_UNUSED addr, FILE *s
} /* end if */
/* Load the extensible array header */
- if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Sanity check */
HDassert(H5F_addr_eq(hdr->idx_blk_addr, addr));
/* Protect index block */
- if(NULL == (iblock = H5EA__iblock_protect(hdr, dxpl_id, H5AC_READ)))
+ if(NULL == (iblock = H5EA__iblock_protect(hdr, dxpl_id, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array index block, address = %llu", (unsigned long long)hdr->idx_blk_addr)
/* Print opening message */
@@ -343,12 +343,12 @@ H5EA__sblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
} /* end if */
/* Load the extensible array header */
- if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Protect super block */
/* (Note: setting parent of super block to 'hdr' for this operation should be OK -QAK) */
- if(NULL == (sblock = H5EA__sblock_protect(hdr, dxpl_id, (H5EA_iblock_t *)hdr, addr, sblk_idx, H5AC_READ)))
+ if(NULL == (sblock = H5EA__sblock_protect(hdr, dxpl_id, (H5EA_iblock_t *)hdr, addr, sblk_idx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array super block, address = %llu", (unsigned long long)addr)
/* Print opening message */
@@ -437,12 +437,12 @@ H5EA__dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
} /* end if */
/* Load the extensible array header */
- if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Protect data block */
/* (Note: setting parent of data block to 'hdr' for this operation should be OK -QAK) */
- if(NULL == (dblock = H5EA__dblock_protect(hdr, dxpl_id, hdr, addr, dblk_nelmts, H5AC_READ)))
+ if(NULL == (dblock = H5EA__dblock_protect(hdr, dxpl_id, hdr, addr, dblk_nelmts, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block, address = %llu", (unsigned long long)addr)
/* Print opening message */
diff --git a/src/H5EAdblkpage.c b/src/H5EAdblkpage.c
index 37fd68b..5d188c2 100644
--- a/src/H5EAdblkpage.c
+++ b/src/H5EAdblkpage.c
@@ -210,7 +210,7 @@ END_FUNC(PKG) /* end H5EA__dblk_page_create() */
BEGIN_FUNC(PKG, ERR,
H5EA_dblk_page_t *, NULL, NULL,
H5EA__dblk_page_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_sblock_t *parent,
- haddr_t dblk_page_addr, H5AC_protect_t rw))
+ haddr_t dblk_page_addr, unsigned flags))
/* Local variables */
H5EA_dblk_page_cache_ud_t udata; /* Information needed for loading data block page */
@@ -223,12 +223,16 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(hdr);
HDassert(H5F_addr_defined(dblk_page_addr));
+ /* only the H5AC__READ_ONLY_FLAG may be set */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Set up user data */
udata.hdr = hdr;
udata.parent = parent;
+ udata.dblk_page_addr = dblk_page_addr;
/* Protect the data block page */
- if(NULL == (ret_value = (H5EA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLK_PAGE, dblk_page_addr, &udata, rw)))
+ if(NULL == (ret_value = (H5EA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLK_PAGE, dblk_page_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block page, address = %llu", (unsigned long long)dblk_page_addr)
CATCH
diff --git a/src/H5EAdblock.c b/src/H5EAdblock.c
index dd1e7b0..24be87a 100644
--- a/src/H5EAdblock.c
+++ b/src/H5EAdblock.c
@@ -304,7 +304,7 @@ END_FUNC(PKG) /* end H5EA__dblock_sblk_idx() */
BEGIN_FUNC(PKG, ERR,
H5EA_dblock_t *, NULL, NULL,
H5EA__dblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent,
- haddr_t dblk_addr, size_t dblk_nelmts, H5AC_protect_t rw))
+ haddr_t dblk_addr, size_t dblk_nelmts, unsigned flags))
/* Local variables */
H5EA_dblock_cache_ud_t udata; /* Information needed for loading data block */
@@ -318,13 +318,17 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(H5F_addr_defined(dblk_addr));
HDassert(dblk_nelmts);
+ /* only the H5AC__READ_ONLY_FLAG may be set */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Set up user data */
udata.hdr = hdr;
udata.parent = parent;
udata.nelmts = dblk_nelmts;
+ udata.dblk_addr = dblk_addr;
/* Protect the data block */
- if(NULL == (ret_value = (H5EA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLOCK, dblk_addr, &udata, rw)))
+ if(NULL == (ret_value = (H5EA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLOCK, dblk_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block, address = %llu", (unsigned long long)dblk_addr)
CATCH
@@ -399,7 +403,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(dblk_nelmts > 0);
/* Protect data block */
- if(NULL == (dblock = H5EA__dblock_protect(hdr, dxpl_id, parent, dblk_addr, dblk_nelmts, H5AC_WRITE)))
+ if(NULL == (dblock = H5EA__dblock_protect(hdr, dxpl_id, parent, dblk_addr, dblk_nelmts, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block, address = %llu", (unsigned long long)dblk_addr)
/* Check if this is a paged data block */
diff --git a/src/H5EAhdr.c b/src/H5EAhdr.c
index 76ea6d8..e30bbac 100644
--- a/src/H5EAhdr.c
+++ b/src/H5EAhdr.c
@@ -627,16 +627,25 @@ END_FUNC(PKG) /* end H5EA__hdr_modified() */
BEGIN_FUNC(PKG, ERR,
H5EA_hdr_t *, NULL, NULL,
H5EA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata,
- H5AC_protect_t rw))
+ unsigned flags))
/* Local variables */
+ H5EA_hdr_cache_ud_t udata; /* User data for cache callbacks */
/* Sanity check */
HDassert(f);
HDassert(H5F_addr_defined(ea_addr));
+ /* only the H5AC__READ_ONLY_FLAG may appear in flags */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
+ /* Set up user data for cache callbacks */
+ udata.f = f;
+ udata.addr = ea_addr;
+ udata.ctx_udata = ctx_udata;
+
/* Protect the header */
- if(NULL == (ret_value = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, ctx_udata, rw)))
+ if(NULL == (ret_value = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array header, address = %llu", (unsigned long long)ea_addr)
CATCH
diff --git a/src/H5EAiblock.c b/src/H5EAiblock.c
index 3c2894a..364b443 100644
--- a/src/H5EAiblock.c
+++ b/src/H5EAiblock.c
@@ -279,7 +279,7 @@ END_FUNC(PKG) /* end H5EA__iblock_create() */
*/
BEGIN_FUNC(PKG, ERR,
H5EA_iblock_t *, NULL, NULL,
-H5EA__iblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5AC_protect_t rw))
+H5EA__iblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, unsigned flags))
#ifdef QAK
HDfprintf(stderr, "%s: Called\n", FUNC);
@@ -288,8 +288,11 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Sanity check */
HDassert(hdr);
+ /* only the H5AC__READ_ONLY_FLAG may be set */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Protect the index block */
- if(NULL == (ret_value = (H5EA_iblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_IBLOCK, hdr->idx_blk_addr, hdr, rw)))
+ if(NULL == (ret_value = (H5EA_iblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_IBLOCK, hdr->idx_blk_addr, hdr, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array index block, address = %llu", (unsigned long long)hdr->idx_blk_addr)
CATCH
@@ -361,7 +364,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(H5F_addr_defined(hdr->idx_blk_addr));
/* Protect index block */
- if(NULL == (iblock = H5EA__iblock_protect(hdr, dxpl_id, H5AC_WRITE)))
+ if(NULL == (iblock = H5EA__iblock_protect(hdr, dxpl_id, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array index block, address = %llu", (unsigned long long)hdr->idx_blk_addr)
/* Check for index block having data block pointers */
@@ -436,7 +439,6 @@ H5EA__iblock_dest(H5EA_iblock_t *iblock))
/* Sanity check */
HDassert(iblock);
- HDassert(iblock->rc == 0);
/* Check if shared header field has been initialized */
if(iblock->hdr) {
diff --git a/src/H5EApkg.h b/src/H5EApkg.h
index 5382eac..d328c05 100644
--- a/src/H5EApkg.h
+++ b/src/H5EApkg.h
@@ -229,7 +229,6 @@ typedef struct H5EA_iblock_t {
haddr_t *sblk_addrs; /* Buffer for addresses of super blocks in index block */
/* Internal array information (not stored) */
- size_t rc; /* Reference count of objects using this block */
H5EA_hdr_t *hdr; /* Shared array header info */
haddr_t addr; /* Address of this index block on disk */
size_t size; /* Size of index block on disk */
@@ -251,7 +250,6 @@ typedef struct H5EA_sblock_t {
uint8_t *page_init; /* Bitmap of whether a data block page is initialized */
/* Internal array information (not stored) */
- size_t rc; /* Reference count of objects using this block */
H5EA_hdr_t *hdr; /* Shared array header info */
H5EA_iblock_t *parent; /* Parent object for super block (index block) */
haddr_t addr; /* Address of this index block on disk */
@@ -312,11 +310,19 @@ struct H5EA_t {
/* Metadata cache callback user data types */
+/* Info needed for loading header */
+typedef struct H5EA_hdr_cache_ud_t {
+ H5F_t *f; /* Pointer to file for extensible array */
+ haddr_t addr; /* Address of header on disk */
+ void *ctx_udata; /* User context for class */
+} H5EA_hdr_cache_ud_t;
+
/* Info needed for loading super block */
typedef struct H5EA_sblock_cache_ud_t {
H5EA_hdr_t *hdr; /* Shared extensible array information */
H5EA_iblock_t *parent; /* Pointer to parent object for super block (index block) */
unsigned sblk_idx; /* Index of super block */
+ haddr_t sblk_addr; /* Address of super block */
} H5EA_sblock_cache_ud_t;
/* Info needed for loading data block */
@@ -324,12 +330,14 @@ typedef struct H5EA_dblock_cache_ud_t {
H5EA_hdr_t *hdr; /* Shared extensible array information */
void *parent; /* Pointer to parent object for data block (index or super block) */
size_t nelmts; /* Number of elements in data block */
+ haddr_t dblk_addr; /* Address of data block */
} H5EA_dblock_cache_ud_t;
/* Info needed for loading data block page */
typedef struct H5EA_dblk_page_cache_ud_t {
H5EA_hdr_t *hdr; /* Shared extensible array information */
H5EA_sblock_t *parent; /* Pointer to parent object for data block page (super block) */
+ haddr_t dblk_page_addr; /* Address of data block page */
} H5EA_dblk_page_cache_ud_t;
#ifdef H5EA_TESTING
@@ -388,7 +396,7 @@ H5_DLL herr_t H5EA__hdr_fuse_incr(H5EA_hdr_t *hdr);
H5_DLL size_t H5EA__hdr_fuse_decr(H5EA_hdr_t *hdr);
H5_DLL herr_t H5EA__hdr_modified(H5EA_hdr_t *hdr);
H5_DLL H5EA_hdr_t *H5EA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr,
- void *ctx_udata, H5AC_protect_t rw);
+ void *ctx_udata, unsigned flags);
H5_DLL herr_t H5EA__hdr_unprotect(H5EA_hdr_t *hdr, hid_t dxpl_id, unsigned cache_flags);
H5_DLL herr_t H5EA__hdr_delete(H5EA_hdr_t *hdr, hid_t dxpl_id);
H5_DLL herr_t H5EA__hdr_dest(H5EA_hdr_t *hdr);
@@ -398,7 +406,7 @@ H5_DLL H5EA_iblock_t *H5EA__iblock_alloc(H5EA_hdr_t *hdr);
H5_DLL haddr_t H5EA__iblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id,
hbool_t *stats_changed);
H5_DLL H5EA_iblock_t *H5EA__iblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id,
- H5AC_protect_t rw);
+ unsigned flags);
H5_DLL herr_t H5EA__iblock_unprotect(H5EA_iblock_t *iblock, hid_t dxpl_id,
unsigned cache_flags);
H5_DLL herr_t H5EA__iblock_delete(H5EA_hdr_t *hdr, hid_t dxpl_id);
@@ -410,7 +418,8 @@ H5_DLL H5EA_sblock_t *H5EA__sblock_alloc(H5EA_hdr_t *hdr, H5EA_iblock_t *parent,
H5_DLL haddr_t H5EA__sblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id,
H5EA_iblock_t *parent, hbool_t *stats_changed, unsigned sblk_idx);
H5_DLL H5EA_sblock_t *H5EA__sblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id,
- H5EA_iblock_t *parent, haddr_t sblk_addr, unsigned sblk_idx, H5AC_protect_t rw);
+ H5EA_iblock_t *parent, haddr_t sblk_addr, unsigned sblk_idx,
+ unsigned flags);
H5_DLL herr_t H5EA__sblock_unprotect(H5EA_sblock_t *sblock, hid_t dxpl_id,
unsigned cache_flags);
H5_DLL herr_t H5EA__sblock_delete(H5EA_hdr_t *hdr, hid_t dxpl_id,
@@ -424,7 +433,7 @@ H5_DLL haddr_t H5EA__dblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent,
hbool_t *stats_changed, hsize_t dblk_off, size_t nelmts);
H5_DLL unsigned H5EA__dblock_sblk_idx(const H5EA_hdr_t *hdr, hsize_t idx);
H5_DLL H5EA_dblock_t *H5EA__dblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id,
- void *parent, haddr_t dblk_addr, size_t dblk_nelmts, H5AC_protect_t rw);
+ void *parent, haddr_t dblk_addr, size_t dblk_nelmts, unsigned flags);
H5_DLL herr_t H5EA__dblock_unprotect(H5EA_dblock_t *dblock, hid_t dxpl_id,
unsigned cache_flags);
H5_DLL herr_t H5EA__dblock_delete(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent,
@@ -436,7 +445,7 @@ H5_DLL H5EA_dblk_page_t *H5EA__dblk_page_alloc(H5EA_hdr_t *hdr, H5EA_sblock_t *p
H5_DLL herr_t H5EA__dblk_page_create(H5EA_hdr_t *hdr, hid_t dxpl_id,
H5EA_sblock_t *parent, haddr_t addr);
H5_DLL H5EA_dblk_page_t *H5EA__dblk_page_protect(H5EA_hdr_t *hdr, hid_t dxpl_id,
- H5EA_sblock_t *parent, haddr_t dblk_page_addr, H5AC_protect_t rw);
+ H5EA_sblock_t *parent, haddr_t dblk_page_addr, unsigned flags);
H5_DLL herr_t H5EA__dblk_page_unprotect(H5EA_dblk_page_t *dblk_page,
hid_t dxpl_id, unsigned cache_flags);
H5_DLL herr_t H5EA__dblk_page_dest(H5EA_dblk_page_t *dblk_page);
diff --git a/src/H5EAsblock.c b/src/H5EAsblock.c
index 1cc8bd9..33f34c9 100644
--- a/src/H5EAsblock.c
+++ b/src/H5EAsblock.c
@@ -276,7 +276,7 @@ END_FUNC(PKG) /* end H5EA__sblock_create() */
BEGIN_FUNC(PKG, ERR,
H5EA_sblock_t *, NULL, NULL,
H5EA__sblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_iblock_t *parent,
- haddr_t sblk_addr, unsigned sblk_idx, H5AC_protect_t rw))
+ haddr_t sblk_addr, unsigned sblk_idx, unsigned flags))
/* Local variables */
H5EA_sblock_cache_ud_t udata; /* Information needed for loading super block */
@@ -290,13 +290,17 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(hdr);
HDassert(H5F_addr_defined(sblk_addr));
+ /* only the H5AC__READ_ONLY_FLAG may be set */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Set up user data */
udata.hdr = hdr;
udata.parent = parent;
udata.sblk_idx = sblk_idx;
+ udata.sblk_addr = sblk_addr;
/* Protect the super block */
- if(NULL == (ret_value = (H5EA_sblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_SBLOCK, sblk_addr, &udata, rw)))
+ if(NULL == (ret_value = (H5EA_sblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_SBLOCK, sblk_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array super block, address = %llu", (unsigned long long)sblk_addr)
CATCH
@@ -370,7 +374,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(H5F_addr_defined(sblk_addr));
/* Protect super block */
- if(NULL == (sblock = H5EA__sblock_protect(hdr, dxpl_id, parent, sblk_addr, sblk_idx, H5AC_WRITE)))
+ if(NULL == (sblock = H5EA__sblock_protect(hdr, dxpl_id, parent, sblk_addr, sblk_idx, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array super block, address = %llu", (unsigned long long)sblk_addr)
/* Iterate over data blocks */
@@ -412,7 +416,6 @@ H5EA__sblock_dest(H5EA_sblock_t *sblock))
/* Sanity check */
HDassert(sblock);
- HDassert(sblock->rc == 0);
#ifdef QAK
HDfprintf(stderr, "%s: sblock->hdr->dblk_page_nelmts = %Zu, sblock->ndblks = %Zu, sblock->dblk_nelmts = %Zu\n", FUNC, sblock->hdr->dblk_page_nelmts, sblock->ndblks, sblock->dblk_nelmts);
#endif /* QAK */
diff --git a/src/H5FA.c b/src/H5FA.c
index 1acb10b..bfb2a00 100644
--- a/src/H5FA.c
+++ b/src/H5FA.c
@@ -141,7 +141,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for fixed array info")
/* Lock the array header into memory */
- if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC_WRITE)))
+ if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header")
/* Point fixed array wrapper at header and bump it's ref count */
@@ -201,7 +201,7 @@ H5FA_open(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata))
#ifdef H5FA_DEBUG
HDfprintf(stderr, "%s: fa_addr = %a\n", FUNC, fa_addr);
#endif /* H5FA_DEBUG */
- if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC_READ)))
+ if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header, address = %llu", (unsigned long long)fa_addr)
/* Check for pending array deletion */
@@ -359,7 +359,7 @@ HDfprintf(stderr, "%s: fixed array data block address not defined!\n", FUNC, idx
HDassert(idx < hdr->cparam.nelmts);
/* Protect data block */
- if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, H5AC_WRITE)))
+ if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)hdr->dblk_addr)
/* Check for paging data block */
@@ -400,7 +400,7 @@ HDfprintf(stderr, "%s: fixed array data block address not defined!\n", FUNC, idx
} /* end if */
/* Protect the data block page */
- if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC_WRITE)))
+ if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block page, address = %llu", (unsigned long long)dblk_page_addr)
/* Set the element in the data block page */
@@ -467,7 +467,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
else {
/* Get the data block */
HDassert(H5F_addr_defined(hdr->dblk_addr));
- if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, H5AC_READ)))
+ if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)hdr->dblk_addr)
/* Check for paged data block */
@@ -507,7 +507,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
dblk_page_nelmts = dblock->dblk_page_nelmts;
/* Protect the data block page */
- if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC_READ)))
+ if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block page, address = %llu", (unsigned long long)dblk_page_addr)
/* Retrieve element from data block */
@@ -592,7 +592,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Lock the array header into memory */
/* (OK to pass in NULL for callback context, since we know the header must be in the cache) */
- if(NULL == (hdr = H5FA__hdr_protect(fa->f, dxpl_id, fa_addr, NULL, H5AC_WRITE)))
+ if(NULL == (hdr = H5FA__hdr_protect(fa->f, dxpl_id, fa_addr, NULL, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTLOAD, "unable to load fixed array header")
/* Set the shared array header's file context for this operation */
@@ -655,7 +655,7 @@ H5FA_delete(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata))
#ifdef H5FA_DEBUG
HDfprintf(stderr, "%s: fa_addr = %a\n", FUNC, fa_addr);
#endif /* H5FA_DEBUG */
- if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC_WRITE)))
+ if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array header, address = %llu", (unsigned long long)fa_addr)
/* Check for files using shared array header */
diff --git a/src/H5FAcache.c b/src/H5FAcache.c
index 9a1fc57..298eb56 100644
--- a/src/H5FAcache.c
+++ b/src/H5FAcache.c
@@ -55,11 +55,6 @@
#define H5FA_HDR_VERSION 0 /* Header */
#define H5FA_DBLOCK_VERSION 0 /* Data block */
-/* Size of stack buffer for serialization buffers */
-#define H5FA_HDR_BUF_SIZE 512
-#define H5FA_DBLOCK_BUF_SIZE 512
-#define H5FA_DBLK_PAGE_BUF_SIZE 512
-
/******************/
/* Local Typedefs */
@@ -76,23 +71,35 @@
/********************/
/* Metadata cache (H5AC) callbacks */
-static H5FA_hdr_t *H5FA__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5FA__cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5FA_hdr_t *hdr, unsigned * flags_ptr);
-static herr_t H5FA__cache_hdr_clear(H5F_t *f, H5FA_hdr_t *hdr, hbool_t destroy);
-static herr_t H5FA__cache_hdr_size(const H5F_t *f, const H5FA_hdr_t *hdr, size_t *size_ptr);
-static herr_t H5FA__cache_hdr_dest(H5F_t *f, H5FA_hdr_t *hdr);
-
-static H5FA_dblock_t *H5FA__cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5FA__cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5FA_dblock_t *dblock, unsigned * flags_ptr);
-static herr_t H5FA__cache_dblock_clear(H5F_t *f, H5FA_dblock_t *dblock, hbool_t destroy);
-static herr_t H5FA__cache_dblock_size(const H5F_t *f, const H5FA_dblock_t *dblock, size_t *size_ptr);
-static herr_t H5FA__cache_dblock_dest(H5F_t *f, H5FA_dblock_t *dblock);
-
-static H5FA_dblk_page_t *H5FA__cache_dblk_page_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5FA__cache_dblk_page_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5FA_dblk_page_t *dblk_page, unsigned * flags_ptr);
-static herr_t H5FA__cache_dblk_page_clear(H5F_t *f, H5FA_dblk_page_t *dblk_page, hbool_t destroy);
-static herr_t H5FA__cache_dblk_page_size(const H5F_t *f, const H5FA_dblk_page_t *dblk_page, size_t *size_ptr);
-static herr_t H5FA__cache_dblk_page_dest(H5F_t *f, H5FA_dblk_page_t *dblk_page);
+static herr_t H5FA__cache_hdr_get_load_size(const void *udata, size_t *image_len);
+static void *H5FA__cache_hdr_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5FA__cache_hdr_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5FA__cache_hdr_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5FA__cache_hdr_free_icr(void *thing);
+
+static herr_t H5FA__cache_dblock_get_load_size(const void *udata, size_t *image_len);
+static void *H5FA__cache_dblock_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5FA__cache_dblock_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5FA__cache_dblock_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5FA__cache_dblock_free_icr(void *thing);
+static herr_t H5FA__cache_dblock_fsf_size(const void *thing, size_t *fsf_size);
+
+static herr_t H5FA__cache_dblk_page_get_load_size(const void *udata, size_t *image_len);
+static void *H5FA__cache_dblk_page_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5FA__cache_dblk_page_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5FA__cache_dblk_page_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5FA__cache_dblk_page_free_icr(void *thing);
/*********************/
@@ -101,36 +108,53 @@ static herr_t H5FA__cache_dblk_page_dest(H5F_t *f, H5FA_dblk_page_t *dblk_page);
/* H5FA header inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_FARRAY_HDR[1] = {{
- H5AC_FARRAY_HDR_ID,
- (H5AC_load_func_t)H5FA__cache_hdr_load,
- (H5AC_flush_func_t)H5FA__cache_hdr_flush,
- (H5AC_dest_func_t)H5FA__cache_hdr_dest,
- (H5AC_clear_func_t)H5FA__cache_hdr_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5FA__cache_hdr_size,
+ H5AC_FARRAY_HDR_ID, /* Metadata client ID */
+ "Fixed-array Header", /* Metadata client name (for debugging) */
+ H5FD_MEM_FARRAY_HDR, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5FA__cache_hdr_get_load_size, /* 'get_load_size' callback */
+ H5FA__cache_hdr_deserialize, /* 'deserialize' callback */
+ H5FA__cache_hdr_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5FA__cache_hdr_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5FA__cache_hdr_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
-
/* H5FA data block inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_FARRAY_DBLOCK[1] = {{
- H5AC_FARRAY_DBLOCK_ID,
- (H5AC_load_func_t)H5FA__cache_dblock_load,
- (H5AC_flush_func_t)H5FA__cache_dblock_flush,
- (H5AC_dest_func_t)H5FA__cache_dblock_dest,
- (H5AC_clear_func_t)H5FA__cache_dblock_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5FA__cache_dblock_size,
+ H5AC_FARRAY_DBLOCK_ID, /* Metadata client ID */
+ "Fixed Array Data Block", /* Metadata client name (for debugging) */
+ H5FD_MEM_FARRAY_DBLOCK, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5FA__cache_dblock_get_load_size, /* 'get_load_size' callback */
+ H5FA__cache_dblock_deserialize, /* 'deserialize' callback */
+ H5FA__cache_dblock_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5FA__cache_dblock_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5FA__cache_dblock_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ H5FA__cache_dblock_fsf_size, /* 'fsf_size' callback */
}};
/* H5FA data block page inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_FARRAY_DBLK_PAGE[1] = {{
- H5AC_FARRAY_DBLK_PAGE_ID,
- (H5AC_load_func_t)H5FA__cache_dblk_page_load,
- (H5AC_flush_func_t)H5FA__cache_dblk_page_flush,
- (H5AC_dest_func_t)H5FA__cache_dblk_page_dest,
- (H5AC_clear_func_t)H5FA__cache_dblk_page_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5FA__cache_dblk_page_size,
+ H5AC_FARRAY_DBLK_PAGE_ID, /* Metadata client ID */
+ "Fixed Array Data Block Page", /* Metadata client name (for debugging) */
+ H5FD_MEM_FARRAY_DBLK_PAGE, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5FA__cache_dblk_page_get_load_size, /* 'get_load_size' callback */
+ H5FA__cache_dblk_page_deserialize, /* 'deserialize' callback */
+ H5FA__cache_dblk_page_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5FA__cache_dblk_page_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5FA__cache_dblk_page_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
@@ -146,88 +170,101 @@ const H5AC_class_t H5AC_FARRAY_DBLK_PAGE[1] = {{
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_hdr_load
+ * Function: H5FA__cache_hdr_get_load_size
+ *
+ * Purpose: Compute the size of the data structure on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 31, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5FA__cache_hdr_get_load_size(const void *_udata, size_t *image_len))
+
+ /* Local variables */
+ const H5FA_hdr_cache_ud_t *udata = (const H5FA_hdr_cache_ud_t *)_udata; /* User data for callback */
+
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(image_len);
+
+ /* Set the image length size */
+ *image_len = (size_t)H5FA_HEADER_SIZE_FILE(udata->f);
+
+END_FUNC(STATIC) /* end H5FA__cache_hdr_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FA__cache_hdr_deserialize
*
- * Purpose: Loads a fixed array header from the disk.
+ * Purpose: Loads a data structure from the disk.
*
- * Return: Success: Pointer to a new fixed array
+ * Return: Success: Pointer to a new B-tree.
* Failure: NULL
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 12, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
-H5FA_hdr_t *, NULL, NULL,
-H5FA__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata))
+void *, NULL, NULL,
+H5FA__cache_hdr_deserialize(const void *_image, size_t len,
+ void *_udata, hbool_t H5_ATTR_UNUSED *dirty))
/* Local variables */
H5FA_cls_id_t id; /* ID of fixed array class, as found in file */
H5FA_hdr_t *hdr = NULL; /* Fixed array info */
- size_t size; /* Header size */
- H5WB_t *wb = NULL; /* Wrapped buffer for header data */
- uint8_t hdr_buf[H5FA_HDR_BUF_SIZE]; /* Buffer for header */
- uint8_t *buf; /* Pointer to header buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ H5FA_hdr_cache_ud_t *udata = (H5FA_hdr_cache_ud_t *)_udata;
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(H5F_addr_defined(udata->addr));
/* Allocate space for the fixed array data structure */
- if(NULL == (hdr = H5FA__hdr_alloc(f)))
+ if(NULL == (hdr = H5FA__hdr_alloc(udata->f)))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for fixed array shared header")
/* Set the fixed array header's address */
- hdr->addr = addr;
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the 'base' size of the fixed array header on disk */
- size = H5FA_HEADER_SIZE_HDR(hdr);
-
- /* Get a pointer to a buffer that's large enough for serialized header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Read header from disk */
- if(H5F_block_read(f, H5FD_MEM_FARRAY_HDR, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_READERROR, "can't read fixed array header")
-
- /* Get temporary pointer to serialized header */
- p = buf;
+ hdr->addr = udata->addr;
/* Magic number */
- if(HDmemcmp(p, H5FA_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5FA_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
H5E_THROW(H5E_BADVALUE, "wrong fixed array header signature")
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5FA_HDR_VERSION)
+ if(*image++ != H5FA_HDR_VERSION)
H5E_THROW(H5E_VERSION, "wrong fixed array header version")
/* Fixed array class */
- id = (H5FA_cls_id_t)*p++;
+ id = (H5FA_cls_id_t)*image++;
if(id >= H5FA_NUM_CLS_ID)
H5E_THROW(H5E_BADTYPE, "incorrect fixed array class")
hdr->cparam.cls = H5FA_client_class_g[id];
/* General array creation/configuration information */
- hdr->cparam.raw_elmt_size = *p++; /* Element size in file (in bytes) */
- hdr->cparam.max_dblk_page_nelmts_bits = *p++; /* Log2(Max. # of elements in data block page) -
- i.e. # of bits needed to store max. # of
- elements in data block page. */
+ hdr->cparam.raw_elmt_size = *image++; /* Element size in file (in bytes) */
+ hdr->cparam.max_dblk_page_nelmts_bits = *image++; /* Log2(Max. # of elements in data block page) -
+ i.e. # of bits needed to store max. # of
+ elements in data block page. */
/* Array statistics */
- H5F_DECODE_LENGTH(f, p, hdr->cparam.nelmts); /* Number of elements */
+ H5F_DECODE_LENGTH(udata->f, image, hdr->cparam.nelmts); /* Number of elements */
/* Internal information */
- H5F_addr_decode(f, &p, &hdr->dblk_addr); /* Address of index block */
+ H5F_addr_decode(udata->f, &image, &hdr->dblk_addr); /* Address of index block */
/* Check for data block */
if(H5F_addr_defined(hdr->dblk_addr)) {
@@ -250,26 +287,26 @@ H5FA__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata))
/* Sanity check */
/* (allow for checksum not decoded yet) */
- HDassert((size_t)(p - buf) == (size - H5FA_SIZEOF_CHKSUM));
+ HDassert((size_t)(image - (const uint8_t *)_image) == (len - H5FA_SIZEOF_CHKSUM));
/* Compute checksum on entire header */
/* (including the filter information, if present) */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - buf) == size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == len);
/* Verify checksum */
if(stored_chksum != computed_chksum)
H5E_THROW(H5E_BADVALUE, "incorrect metadata checksum for fixed array header")
/* Finish initializing fixed array header */
- if(H5FA__hdr_init(hdr, udata) < 0)
+ if(H5FA__hdr_init(hdr, udata->ctx_udata) < 0)
H5E_THROW(H5E_CANTINIT, "initialization failed for fixed array header")
- HDassert(hdr->size == size);
+ HDassert(hdr->size == len);
/* Set return value */
ret_value = hdr;
@@ -277,326 +314,277 @@ H5FA__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata))
CATCH
/* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
if(!ret_value)
if(hdr && H5FA__hdr_dest(hdr) < 0)
H5E_THROW(H5E_CANTFREE, "unable to destroy fixed array header")
-END_FUNC(STATIC) /* end H5FA__cache_hdr_load() */
+END_FUNC(STATIC) /* end H5FA__cache_hdr_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_hdr_flush
+ * Function: H5FA__cache_hdr_image_len
*
- * Purpose: Flushes a dirty fixed array header to disk.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 12, 2013
*
*-------------------------------------------------------------------------
*/
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5FA__cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- H5FA_hdr_t *hdr, unsigned H5_ATTR_UNUSED * flags_ptr))
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5FA__cache_hdr_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
- H5WB_t *wb = NULL; /* Wrapped buffer for header data */
- uint8_t hdr_buf[H5FA_HDR_BUF_SIZE]; /* Buffer for header */
+ /* Local variables */
+ const H5FA_hdr_t *hdr = (const H5FA_hdr_t *)_thing; /* Pointer to the object */
- /* check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(hdr);
+ HDassert(image_len);
- if(hdr->cache_info.is_dirty) {
- uint8_t *buf; /* Temporary raw data buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Header size on disk */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
-
- /* Wrap the local buffer for serialized header info */
- if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the array header on disk */
- size = hdr->size;
-
- /* Get a pointer to a buffer that's large enough for serialized header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Get temporary pointer to serialized header */
- p = buf;
+ /* Set the image length size */
+ *image_len = hdr->size;
- /* Magic number */
- HDmemcpy(p, H5FA_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
+END_FUNC(STATIC) /* end H5FA__cache_hdr_image_len() */
- /* Version # */
- *p++ = H5FA_HDR_VERSION;
+
+/*-------------------------------------------------------------------------
+ * Function: H5FA__cache_hdr_serialize
+ *
+ * Purpose: Flushes a dirty object to disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 12, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5FA__cache_hdr_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing))
- /* Fixed array type */
- *p++ = hdr->cparam.cls->id;
+ /* Local variables */
+ H5FA_hdr_t *hdr = (H5FA_hdr_t *)_thing; /* Pointer to the fixed array header */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
- /* General array creation/configuration information */
- *p++ = hdr->cparam.raw_elmt_size; /* Element size in file (in bytes) */
- *p++ = hdr->cparam.max_dblk_page_nelmts_bits; /* Log2(Max. # of elements in data block page) - i.e. # of bits needed to store max. # of elements in data block page */
+ /* check arguments */
+ HDassert(f);
+ HDassert(image);
+ HDassert(hdr);
- /* Array statistics */
- H5F_ENCODE_LENGTH(f, p, hdr->stats.nelmts); /* Number of elements for the fixed array */
+ /* Magic number */
+ HDmemcpy(image, H5FA_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- /* Internal information */
- H5F_addr_encode(f, &p, hdr->dblk_addr); /* Address of fixed array data block */
+ /* Version # */
+ *image++ = H5FA_HDR_VERSION;
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ /* Fixed array type */
+ *image++ = hdr->cparam.cls->id;
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
+ /* General array creation/configuration information */
+ *image++ = hdr->cparam.raw_elmt_size; /* Element size in file (in bytes) */
+ *image++ = hdr->cparam.max_dblk_page_nelmts_bits; /* Log2(Max. # of elements in data block page) - i.e. # of bits needed to store max. # of elements in data block page */
- /* Write the array header. */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_FARRAY_HDR, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_WRITEERROR, "unable to save fixed array header to disk")
+ /* Array statistics */
+ H5F_ENCODE_LENGTH(f, image, hdr->stats.nelmts); /* Number of elements for the fixed array */
- hdr->cache_info.is_dirty = FALSE;
- } /* end if */
+ /* Internal information */
+ H5F_addr_encode(f, &image, hdr->dblk_addr); /* Address of fixed array data block */
- if(destroy)
- if(H5FA__cache_hdr_dest(f, hdr) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy fixed array header")
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
-CATCH
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
-END_FUNC(STATIC) /* end H5FA__cache_hdr_flush() */
+END_FUNC(STATIC) /* end H5FA__cache_hdr_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_hdr_clear
+ * Function: H5FA__cache_hdr_free_icr
*
- * Purpose: Mark a fixed array header in memory as non-dirty.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 12, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5FA__cache_hdr_clear(H5F_t *f, H5FA_hdr_t *hdr, hbool_t destroy))
-
- /* Sanity check */
- HDassert(hdr);
+H5FA__cache_hdr_free_icr(void *thing))
- /* Reset the dirty flag. */
- hdr->cache_info.is_dirty = FALSE;
+ /* Check arguments */
+ HDassert(thing);
- if(destroy)
- if(H5FA__cache_hdr_dest(f, hdr) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy fixed array header")
+ /* Release the extensible array header */
+ if(H5FA__hdr_dest((H5FA_hdr_t *)thing) < 0)
+ H5E_THROW(H5E_CANTFREE, "can't free fixed array header")
CATCH
-END_FUNC(STATIC) /* end H5FA__cache_hdr_clear() */
+END_FUNC(STATIC) /* end H5FA__cache_hdr_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_hdr_size
+ * Function: H5FA__cache_dblock_get_load_size
*
- * Purpose: Compute the size in bytes of a fixed array header
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 12, 2013
*
*-------------------------------------------------------------------------
*/
-/* ARGSUSED */
BEGIN_FUNC(STATIC, NOERR,
herr_t, SUCCEED, -,
-H5FA__cache_hdr_size(const H5F_t H5_ATTR_UNUSED *f, const H5FA_hdr_t *hdr,
- size_t *size_ptr))
-
- /* Sanity check */
- HDassert(f);
- HDassert(hdr);
- HDassert(size_ptr);
-
- /* Set size value */
- *size_ptr = hdr->size;
-
-END_FUNC(STATIC) /* end H5FA__cache_hdr_size() */
+H5FA__cache_dblock_get_load_size(const void *_udata, size_t *image_len))
-
-/*-------------------------------------------------------------------------
- * Function: H5FA__cache_hdr_dest
- *
- * Purpose: Destroys a fixed array header in memory.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
- *
- *-------------------------------------------------------------------------
- */
-/* ARGSUSED */
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5FA__cache_hdr_dest(H5F_t *f, H5FA_hdr_t *hdr))
+ /* Local variables */
+ const H5FA_dblock_cache_ud_t *udata = (const H5FA_dblock_cache_ud_t *)_udata; /* User data */
+ H5FA_dblock_t dblock; /* Fake data block for computing size */
+ size_t dblk_page_nelmts; /* # of elements per data block page */
/* Check arguments */
- HDassert(f);
- HDassert(hdr);
-
- /* Verify that header is clean */
- HDassert(hdr->cache_info.is_dirty == FALSE);
-
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!hdr->cache_info.free_file_space_on_destroy || H5F_addr_defined(hdr->cache_info.addr));
-
- /* Check for freeing file space for fixed array header */
- if(hdr->cache_info.free_file_space_on_destroy) {
- /* Sanity check address */
- HDassert(H5F_addr_eq(hdr->addr, hdr->cache_info.addr));
-
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_FARRAY_HDR, H5AC_dxpl_id, hdr->cache_info.addr, (hsize_t)hdr->size) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to free fixed array header")
+ HDassert(udata);
+ HDassert(udata->hdr);
+ HDassert(image_len);
+
+ /* Set up fake data block for computing size on disk */
+ /* (Note: extracted from H5FA__dblock_alloc) */
+ HDmemset(&dblock, 0, sizeof(dblock));
+
+ /* Set up fake data block for computing size on disk
+ *
+ * need: dblock->hdr
+ * dblock->npages
+ * dblock->dblk_page_init_size
+ */
+
+ dblock.hdr = udata->hdr;
+ dblk_page_nelmts = (size_t)1 << udata->hdr->cparam.max_dblk_page_nelmts_bits;
+ if(udata->hdr->cparam.nelmts > dblk_page_nelmts) {
+ dblock.npages = (size_t)(((udata->hdr->cparam.nelmts + dblk_page_nelmts) - 1) / dblk_page_nelmts);
+ dblock.dblk_page_init_size = (dblock.npages + 7) / 8;
} /* end if */
- /* Release the fixed array header */
- if(H5FA__hdr_dest(hdr) < 0)
- H5E_THROW(H5E_CANTFREE, "can't free fixed array header")
-
-CATCH
+ /* Set the image length size */
+ if(!dblock.npages)
+ *image_len = (size_t)H5FA_DBLOCK_SIZE(&dblock);
+ else
+ *image_len = (size_t)H5FA_DBLOCK_PREFIX_SIZE(&dblock);
-END_FUNC(STATIC) /* end H5FA__cache_hdr_dest() */
+END_FUNC(STATIC) /* end H5FA__cache_dblock_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_dblock_load
+ * Function: H5FA__cache_dblock_deserialize
*
- * Purpose: Loads a fixed array data block from the disk.
+ * Purpose: Loads a data structure from the disk.
*
- * Return: Success: Pointer to a new fixed array data block
+ * Return: Success: Pointer to a new B-tree.
* Failure: NULL
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 14, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
-H5FA_dblock_t *, NULL, NULL,
-H5FA__cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
+void *, NULL, NULL,
+H5FA__cache_dblock_deserialize(const void *_image, size_t len,
+ void *_udata, hbool_t H5_ATTR_UNUSED *dirty))
/* Local variables */
H5FA_dblock_t *dblock = NULL; /* Data block info */
H5FA_dblock_cache_ud_t *udata = (H5FA_dblock_cache_ud_t *)_udata; /* User data for loading data block */
- size_t size; /* Data block size */
- H5WB_t *wb = NULL; /* Wrapped buffer for data block data */
- uint8_t dblock_buf[H5FA_DBLOCK_BUF_SIZE]; /* Buffer for data block */
- uint8_t *buf; /* Pointer to data block buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
haddr_t arr_addr; /* Address of array header in the file */
/* Sanity check */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
- HDassert(udata && udata->hdr);
+ HDassert(udata);
+ HDassert(udata->hdr);
/* Allocate the fixed array data block */
if(NULL == (dblock = H5FA__dblock_alloc(udata->hdr)))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for fixed array data block")
- /* Set the fixed array data block's information */
- dblock->addr = addr;
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(dblock_buf, sizeof(dblock_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the fixed array data block on disk */
- if(!dblock->npages)
- size = (size_t)H5FA_DBLOCK_SIZE(dblock);
- else
- size = H5FA_DBLOCK_PREFIX_SIZE(dblock);
-
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
+ HDassert(((!dblock->npages) && (len == (size_t)H5FA_DBLOCK_SIZE(dblock)))
+ || (len == (size_t)H5FA_DBLOCK_PREFIX_SIZE(dblock)));
- /* Read data block from disk */
- if(H5F_block_read(f, H5FD_MEM_FARRAY_DBLOCK, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_READERROR, "can't read fixed array data block")
-
- /* Get temporary pointer to serialized header */
- p = buf;
+ /* Set the fixed array data block's information */
+ dblock->addr = udata->dblk_addr;
/* Magic number */
- if(HDmemcmp(p, H5FA_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5FA_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
H5E_THROW(H5E_BADVALUE, "wrong fixed array data block signature")
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5FA_DBLOCK_VERSION)
+ if(*image++ != H5FA_DBLOCK_VERSION)
H5E_THROW(H5E_VERSION, "wrong fixed array data block version")
/* Fixed array type */
- if(*p++ != (uint8_t)udata->hdr->cparam.cls->id)
+ if(*image++ != (uint8_t)udata->hdr->cparam.cls->id)
H5E_THROW(H5E_BADTYPE, "incorrect fixed array class")
/* Address of header for array that owns this block (just for file integrity checks) */
- H5F_addr_decode(f, &p, &arr_addr);
+ H5F_addr_decode(udata->hdr->f, &image, &arr_addr);
if(H5F_addr_ne(arr_addr, udata->hdr->addr))
H5E_THROW(H5E_BADVALUE, "wrong fixed array header address")
/* Page initialization flags */
if(dblock->npages > 0) {
- HDmemcpy(dblock->dblk_page_init, p, dblock->dblk_page_init_size);
- p += dblock->dblk_page_init_size;
+ HDmemcpy(dblock->dblk_page_init, image, dblock->dblk_page_init_size);
+ image += dblock->dblk_page_init_size;
} /* end if */
/* Only decode elements if the data block is not paged */
if(!dblock->npages) {
/* Decode elements in data block */
/* Convert from raw elements on disk into native elements in memory */
- if((udata->hdr->cparam.cls->decode)(p, dblock->elmts, (size_t)udata->hdr->stats.nelmts, udata->hdr->cb_ctx) < 0)
+ if((udata->hdr->cparam.cls->decode)(image, dblock->elmts, (size_t)udata->hdr->cparam.nelmts, udata->hdr->cb_ctx) < 0)
H5E_THROW(H5E_CANTDECODE, "can't decode fixed array data elements")
- p += (udata->hdr->stats.nelmts * udata->hdr->cparam.raw_elmt_size);
+ image += (udata->hdr->cparam.nelmts * udata->hdr->cparam.raw_elmt_size);
} /* end if */
/* Sanity check */
/* (allow for checksum not decoded yet) */
- HDassert((size_t)(p - buf) == (size - H5FA_SIZEOF_CHKSUM));
+ HDassert((size_t)(image - (const uint8_t *)_image) == (len - H5FA_SIZEOF_CHKSUM));
/* Set the data block's size */
dblock->size = H5FA_DBLOCK_SIZE(dblock);
/* Compute checksum on data block */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - buf) == size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == len);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -608,317 +596,292 @@ H5FA__cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
CATCH
/* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
if(!ret_value)
if(dblock && H5FA__dblock_dest(dblock) < 0)
H5E_THROW(H5E_CANTFREE, "unable to destroy fixed array data block")
-END_FUNC(STATIC) /* end H5FA__cache_dblock_load() */
+END_FUNC(STATIC) /* end H5FA__cache_dblock_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_dblock_flush
+ * Function: H5FA__cache_dblock_image_len
*
- * Purpose: Flushes a dirty fixed array data block to disk.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 14, 2013
*
*-------------------------------------------------------------------------
*/
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5FA__cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- H5FA_dblock_t *dblock, unsigned H5_ATTR_UNUSED * flags_ptr))
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5FA__cache_dblock_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- H5WB_t *wb = NULL; /* Wrapped buffer for serializing data */
- uint8_t ser_buf[H5FA_DBLOCK_BUF_SIZE]; /* Serialization buffer */
+ const H5FA_dblock_t *dblock = (const H5FA_dblock_t *)_thing; /* Pointer to the object */
- /* Sanity check */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(dblock);
- HDassert(dblock->hdr);
-
- if(dblock->cache_info.is_dirty) {
- uint8_t *buf; /* Temporary raw data buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Index block size on disk */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
+ HDassert(image_len);
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(ser_buf, sizeof(ser_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the data block on disk */
- if(!dblock->npages)
- size = (size_t)dblock->size;
- else
- size = H5FA_DBLOCK_PREFIX_SIZE(dblock);
+ /* Set the image length size */
+ if(!dblock->npages)
+ *image_len = (size_t)dblock->size;
+ else
+ *image_len = H5FA_DBLOCK_PREFIX_SIZE(dblock);
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
+END_FUNC(STATIC) /* end H5FA__cache_dblock_image_len() */
- /* Get temporary pointer to serialized info */
- p = buf;
+
+/*-------------------------------------------------------------------------
+ * Function: H5FA__cache_dblock_serialize
+ *
+ * Purpose: Flushes a dirty object to disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 14, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, ERR,
+herr_t, SUCCEED, FAIL,
+H5FA__cache_dblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing))
- /* Magic number */
- HDmemcpy(p, H5FA_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
+ /* Local variables */
+ H5FA_dblock_t *dblock = (H5FA_dblock_t *)_thing; /* Pointer to the object to serialize */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
- /* Version # */
- *p++ = H5FA_DBLOCK_VERSION;
+ /* Check arguments */
+ HDassert(f);
+ HDassert(image);
+ HDassert(dblock);
+ HDassert(dblock->hdr);
- /* Fixed array type */
- *p++ = dblock->hdr->cparam.cls->id;
+ /* Magic number */
+ HDmemcpy(image, H5FA_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- /* Address of array header for array which owns this block */
- H5F_addr_encode(f, &p, dblock->hdr->addr);
+ /* Version # */
+ *image++ = H5FA_DBLOCK_VERSION;
- /* Page init flags */
- if(dblock->npages > 0) {
- /* Store the 'page init' bitmasks */
- HDmemcpy(p, dblock->dblk_page_init, dblock->dblk_page_init_size);
- p += dblock->dblk_page_init_size;
- } /* end if */
+ /* Fixed array type */
+ *image++ = dblock->hdr->cparam.cls->id;
- /* Only encode elements if the data block is not paged */
- if(!dblock->npages) {
- /* Encode elements in data block */
+ /* Address of array header for array which owns this block */
+ H5F_addr_encode(f, &image, dblock->hdr->addr);
- /* Convert from native elements in memory into raw elements on disk */
- H5_CHECK_OVERFLOW(dblock->hdr->cparam.nelmts, /* From: */hsize_t, /* To: */size_t);
- if((dblock->hdr->cparam.cls->encode)(p, dblock->elmts, (size_t)dblock->hdr->cparam.nelmts, dblock->hdr->cb_ctx) < 0)
- H5E_THROW(H5E_CANTENCODE, "can't encode fixed array data elements")
- p += (dblock->hdr->cparam.nelmts * dblock->hdr->cparam.raw_elmt_size);
- } /* end if */
+ /* Page init flags */
+ if(dblock->npages > 0) {
+ /* Store the 'page init' bitmasks */
+ HDmemcpy(image, dblock->dblk_page_init, dblock->dblk_page_init_size);
+ image += dblock->dblk_page_init_size;
+ } /* end if */
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ /* Only encode elements if the data block is not paged */
+ if(!dblock->npages) {
+ /* Encode elements in data block */
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
+ /* Convert from native elements in memory into raw elements on disk */
+ H5_CHECK_OVERFLOW(dblock->hdr->cparam.nelmts, /* From: */hsize_t, /* To: */size_t);
+ if((dblock->hdr->cparam.cls->encode)(image, dblock->elmts, (size_t)dblock->hdr->cparam.nelmts, dblock->hdr->cb_ctx) < 0)
+ H5E_THROW(H5E_CANTENCODE, "can't encode fixed array data elements")
+ image += (dblock->hdr->cparam.nelmts * dblock->hdr->cparam.raw_elmt_size);
+ } /* end if */
- /* Write the data block */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_FARRAY_DBLOCK, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_WRITEERROR, "unable to save fixed array data block to disk")
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
- dblock->cache_info.is_dirty = FALSE;
- } /* end if */
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
- if(destroy)
- if(H5FA__cache_dblock_dest(f, dblock) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy fixed array data block")
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
CATCH
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
-
-END_FUNC(STATIC) /* end H5FA__cache_dblock_flush() */
+END_FUNC(STATIC) /* end H5FA__cache_dblock_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_dblock_clear
+ * Function: H5FA__cache_dblock_free_icr
*
- * Purpose: Mark a fixed array data block in memory as non-dirty.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 14, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5FA__cache_dblock_clear(H5F_t *f, H5FA_dblock_t *dblock, hbool_t destroy))
+H5FA__cache_dblock_free_icr(void *_thing))
- /* Sanity check */
- HDassert(dblock);
+ H5FA_dblock_t *dblock = (H5FA_dblock_t *)_thing; /* Pointer to the object */
- /* Reset the dirty flag */
- dblock->cache_info.is_dirty = FALSE;
+ /* Check arguments */
+ HDassert(dblock);
- if(destroy)
- if(H5FA__cache_dblock_dest(f, dblock) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy fixed array data block")
+ /* Release the fixed array data block */
+ if(H5FA__dblock_dest(dblock) < 0)
+ H5E_THROW(H5E_CANTFREE, "can't free fixed array data block")
CATCH
-END_FUNC(STATIC) /* end H5FA__cache_dblock_clear() */
+END_FUNC(STATIC) /* end H5FA__cache_dblock_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_dblock_size
- *
- * Purpose: Compute the size in bytes of a fixed array data block
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
- *
+ * Function: H5FA__cache_dblock_fsf_size
+ *
+ * Purpose: Tell the metadata cache the actual amount of file space
+ * to free when a dblock entry is destroyed with the free
+ * file space block set.
+ *
+ * This function is needed when the data block is paged, as
+ * the datablock header and all its pages are allocted as a
+ * single contiguous chunk of file space, and must be
+ * deallocated the same way.
+ *
+ * The size of the chunk of memory in which the dblock
+ * header and all its pages is stored in the size field,
+ * so we simply pass that value back to the cache.
+ *
+ * If the datablock is not paged, then the size field of
+ * the cache_info contains the correct size. However this
+ * value will be the same as the size field, so we return
+ * the contents of the size field to the cache in this case
+ * as well.
+ *
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: John Mainzer
+ * 12/5/14
*
*-------------------------------------------------------------------------
*/
-/* ARGSUSED */
BEGIN_FUNC(STATIC, NOERR,
herr_t, SUCCEED, -,
-H5FA__cache_dblock_size(const H5F_t H5_ATTR_UNUSED *f, const H5FA_dblock_t *dblock,
- size_t *size_ptr))
+H5FA__cache_dblock_fsf_size(const void *_thing, size_t *fsf_size))
- /* Sanity check */
- HDassert(f);
+ const H5FA_dblock_t *dblock = (const H5FA_dblock_t *)_thing; /* Pointer to the object */
+
+ /* Check arguments */
HDassert(dblock);
- HDassert(size_ptr);
+ HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(dblock->cache_info.type == H5AC_FARRAY_DBLOCK);
+ HDassert(fsf_size);
- /* Set size value */
- if(!dblock->npages)
- *size_ptr = (size_t)dblock->size;
- else
- *size_ptr = H5FA_DBLOCK_PREFIX_SIZE(dblock);
+ *fsf_size = dblock->size;
-END_FUNC(STATIC) /* end H5FA__cache_dblock_size() */
+END_FUNC(STATIC) /* end H5FA__cache_dblock_fsf_size() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_dblock_dest
+ * Function: H5FA__cache_dblk_page_get_load_size
*
- * Purpose: Destroys a fixed array data block in memory.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 14, 2013
*
*-------------------------------------------------------------------------
*/
-/* ARGSUSED */
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5FA__cache_dblock_dest(H5F_t *f, H5FA_dblock_t *dblock))
-
- /* Sanity check */
- HDassert(f);
- HDassert(dblock);
-
- /* Verify that data block is clean */
- HDassert(dblock->cache_info.is_dirty == FALSE);
-
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!dblock->cache_info.free_file_space_on_destroy || H5F_addr_defined(dblock->cache_info.addr));
-
- /* Check for freeing file space for fixed array data block */
- if(dblock->cache_info.free_file_space_on_destroy) {
- /* Sanity check address */
- HDassert(H5F_addr_eq(dblock->addr, dblock->cache_info.addr));
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5FA__cache_dblk_page_get_load_size(const void *_udata, size_t *image_len))
- /* Release the space on disk */
- /* (Includes space for pages!) */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_FARRAY_DBLOCK, H5AC_dxpl_id, dblock->cache_info.addr, (hsize_t)dblock->size) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to free fixed array data block")
- } /* end if */
+ /* Local variables */
+ const H5FA_dblk_page_cache_ud_t *udata = (const H5FA_dblk_page_cache_ud_t *)_udata; /* User data */
- /* Release the data block */
- if(H5FA__dblock_dest(dblock) < 0)
- H5E_THROW(H5E_CANTFREE, "can't free fixed array data block")
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(udata->hdr);
+ HDassert(udata->nelmts > 0);
+ HDassert(image_len);
-CATCH
+ *image_len = (size_t)H5FA_DBLK_PAGE_SIZE(udata->hdr, udata->nelmts);
-END_FUNC(STATIC) /* end H5FA__cache_dblock_dest() */
+END_FUNC(STATIC) /* end H5FA__cache_dblk_page_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_dblk_page_load
+ * Function: H5FA__cache_dblk_page_deserialize
*
- * Purpose: Loads a fixed array data block page from the disk.
+ * Purpose: Loads a data structure from the disk.
*
- * Return: Success: Pointer to a new fixed array data block page
+ * Return: Success: Pointer to a new B-tree.
* Failure: NULL
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 14, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
-H5FA_dblk_page_t *, NULL, NULL,
-H5FA__cache_dblk_page_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
+void *, NULL, NULL,
+H5FA__cache_dblk_page_deserialize(const void *_image, size_t len,
+ void *_udata, hbool_t H5_ATTR_UNUSED *dirty))
/* Local variables */
H5FA_dblk_page_t *dblk_page = NULL; /* Data block page info */
H5FA_dblk_page_cache_ud_t *udata = (H5FA_dblk_page_cache_ud_t *)_udata; /* User data for loading data block page */
- size_t size; /* Data block page size */
- H5WB_t *wb = NULL; /* Wrapped buffer for data block page data */
- uint8_t dblk_page_buf[H5FA_DBLK_PAGE_BUF_SIZE]; /* Buffer for data block page */
- uint8_t *buf; /* Pointer to data block page buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
/* Sanity check */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
- HDassert(udata && udata->hdr && udata->nelmts > 0);
+ HDassert(udata);
+ HDassert(udata->hdr);
+ HDassert(udata->nelmts > 0);
+ HDassert(H5F_addr_defined(udata->dblk_page_addr));
/* Allocate the fixed array data block page */
if(NULL == (dblk_page = H5FA__dblk_page_alloc(udata->hdr, udata->nelmts)))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for fixed array data block page")
/* Set the fixed array data block's information */
- dblk_page->addr = addr;
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(dblk_page_buf, sizeof(dblk_page_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the fixed array data block page on disk */
- size = H5FA_DBLK_PAGE_SIZE(udata->hdr, udata->nelmts);
-
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Read data block page from disk */
- if(H5F_block_read(f, H5FD_MEM_FARRAY_DBLK_PAGE, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_READERROR, "can't read fixed array data block page")
-
- /* Get temporary pointer to serialized header */
- p = buf;
+ dblk_page->addr = udata->dblk_page_addr;
/* Internal information */
/* Decode elements in data block page */
/* Convert from raw elements on disk into native elements in memory */
- if((udata->hdr->cparam.cls->decode)(p, dblk_page->elmts, udata->nelmts, udata->hdr->cb_ctx) < 0)
+ if((udata->hdr->cparam.cls->decode)(image, dblk_page->elmts, udata->nelmts, udata->hdr->cb_ctx) < 0)
H5E_THROW(H5E_CANTDECODE, "can't decode fixed array data elements")
- p += (udata->nelmts * udata->hdr->cparam.raw_elmt_size);
+ image += (udata->nelmts * udata->hdr->cparam.raw_elmt_size);
/* Sanity check */
/* (allow for checksum not decoded yet) */
- HDassert((size_t)(p - buf) == (size - H5FA_SIZEOF_CHKSUM));
+ HDassert((size_t)(image - (const uint8_t *)_image) == (len - H5FA_SIZEOF_CHKSUM));
/* Set the data block page's size */
- dblk_page->size = size;
+ dblk_page->size = len;
/* Compute checksum on data block */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - buf) == dblk_page->size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == dblk_page->size);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -930,192 +893,122 @@ H5FA__cache_dblk_page_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
CATCH
/* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
if(!ret_value)
if(dblk_page && H5FA__dblk_page_dest(dblk_page) < 0)
H5E_THROW(H5E_CANTFREE, "unable to destroy fixed array data block page")
-END_FUNC(STATIC) /* end H5FA__cache_dblk_page_load() */
+END_FUNC(STATIC) /* end H5FA__cache_dblk_page_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_dblk_page_flush
+ * Function: H5FA__cache_dblk_page_image_len
*
- * Purpose: Flushes a dirty fixed array data block page to disk.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 14, 2013
*
*-------------------------------------------------------------------------
*/
-BEGIN_FUNC(STATIC, ERR,
-herr_t, SUCCEED, FAIL,
-H5FA__cache_dblk_page_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- H5FA_dblk_page_t *dblk_page, unsigned H5_ATTR_UNUSED * flags_ptr))
+BEGIN_FUNC(STATIC, NOERR,
+herr_t, SUCCEED, -,
+H5FA__cache_dblk_page_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- H5WB_t *wb = NULL; /* Wrapped buffer for serializing data */
- uint8_t ser_buf[H5FA_DBLK_PAGE_BUF_SIZE]; /* Serialization buffer */
+ const H5FA_dblk_page_t *dblk_page = (const H5FA_dblk_page_t *)_thing; /* Pointer to the object */
- /* Sanity check */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(dblk_page);
- HDassert(dblk_page->hdr);
-
- if(dblk_page->cache_info.is_dirty) {
- uint8_t *buf; /* Temporary raw data buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Index block size on disk */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
-
- /* Wrap the local buffer for serialized info */
- if(NULL == (wb = H5WB_wrap(ser_buf, sizeof(ser_buf))))
- H5E_THROW(H5E_CANTINIT, "can't wrap buffer")
-
- /* Compute the size of the data block on disk */
- size = dblk_page->size;
-
- /* Get a pointer to a buffer that's large enough for serialized info */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- H5E_THROW(H5E_CANTGET, "can't get actual buffer")
-
- /* Get temporary pointer to serialized info */
- p = buf;
-
- /* Internal information */
-
- /* Encode elements in data block page */
-
- /* Convert from native elements in memory into raw elements on disk */
- if((dblk_page->hdr->cparam.cls->encode)(p, dblk_page->elmts, dblk_page->nelmts, dblk_page->hdr->cb_ctx) < 0)
- H5E_THROW(H5E_CANTENCODE, "can't encode fixed array data elements")
- p += (dblk_page->nelmts * dblk_page->hdr->cparam.raw_elmt_size);
-
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
-
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
-
- /* Write the data block */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_FARRAY_DBLK_PAGE, addr, size, dxpl_id, buf) < 0)
- H5E_THROW(H5E_WRITEERROR, "unable to save fixed array data block page to disk")
-
- dblk_page->cache_info.is_dirty = FALSE;
- } /* end if */
-
- if(destroy)
- if(H5FA__cache_dblk_page_dest(f, dblk_page) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy fixed array data block page")
+ HDassert(image_len);
-CATCH
-
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- H5E_THROW(H5E_CLOSEERROR, "can't close wrapped buffer")
+ /* Set the image length size */
+ *image_len = dblk_page->size;
-END_FUNC(STATIC) /* end H5FA__cache_dblk_page_flush() */
+END_FUNC(STATIC) /* end H5FA__cache_dblk_page_image_len() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_dblk_page_clear
+ * Function: H5FA__cache_dblk_page_serialize
*
- * Purpose: Mark a fixed array data block page in memory as non-dirty.
+ * Purpose: Flushes a dirty object to disk.
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 14, 2013
*
*-------------------------------------------------------------------------
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5FA__cache_dblk_page_clear(H5F_t *f, H5FA_dblk_page_t *dblk_page, hbool_t destroy))
+H5FA__cache_dblk_page_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing))
+
+ /* Local variables */
+ H5FA_dblk_page_t *dblk_page = (H5FA_dblk_page_t *)_thing; /* Pointer to the object to serialize */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
/* Sanity check */
+ HDassert(f);
+ HDassert(image);
HDassert(dblk_page);
+ HDassert(dblk_page->hdr);
- /* Reset the dirty flag */
- dblk_page->cache_info.is_dirty = FALSE;
+ /* Internal information */
- if(destroy)
- if(H5FA__cache_dblk_page_dest(f, dblk_page) < 0)
- H5E_THROW(H5E_CANTFREE, "unable to destroy fixed array data block page")
+ /* Encode elements in data block page */
-CATCH
+ /* Convert from native elements in memory into raw elements on disk */
+ if((dblk_page->hdr->cparam.cls->encode)(image, dblk_page->elmts, dblk_page->nelmts, dblk_page->hdr->cb_ctx) < 0)
+ H5E_THROW(H5E_CANTENCODE, "can't encode fixed array data elements")
+ image += (dblk_page->nelmts * dblk_page->hdr->cparam.raw_elmt_size);
-END_FUNC(STATIC) /* end H5FA__cache_dblk_page_clear() */
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
-
-/*-------------------------------------------------------------------------
- * Function: H5FA__cache_dblk_page_size
- *
- * Purpose: Compute the size in bytes of a fixed array data block page
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
- *
- *-------------------------------------------------------------------------
- */
-/* ARGSUSED */
-BEGIN_FUNC(STATIC, NOERR,
-herr_t, SUCCEED, -,
-H5FA__cache_dblk_page_size(const H5F_t H5_ATTR_UNUSED *f, const H5FA_dblk_page_t *dblk_page,
- size_t *size_ptr))
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
/* Sanity check */
- HDassert(f);
- HDassert(dblk_page);
- HDassert(size_ptr);
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
- /* Set size value */
- *size_ptr = dblk_page->size;
+CATCH
-END_FUNC(STATIC) /* end H5FA__cache_dblk_page_size() */
+END_FUNC(STATIC) /* end H5FA__cache_dblk_page_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5FA__cache_dblk_page_dest
- *
- * Purpose: Destroys a fixed array data block page in memory.
+ * Function: H5FA__cache_dblk_page_free_icr
*
- * Note: Does _not_ free the space for the page on disk, that is
- * handled through the data block that "owns" the page.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Vailin Choi
- * Thursday, April 30, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 14, 2013
*
*-------------------------------------------------------------------------
*/
-/* ARGSUSED */
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5FA__cache_dblk_page_dest(H5F_t H5_ATTR_UNUSED *f, H5FA_dblk_page_t *dblk_page))
+H5FA__cache_dblk_page_free_icr(void *thing))
- /* Sanity check */
- HDassert(f);
- HDassert(dblk_page);
-
- /* Verify that data block page is clean */
- HDassert(dblk_page->cache_info.is_dirty == FALSE);
+ /* Check arguments */
+ HDassert(thing);
- /* Release the data block page */
- if(H5FA__dblk_page_dest(dblk_page) < 0)
+ /* Release the fixed array data block page */
+ if(H5FA__dblk_page_dest((H5FA_dblk_page_t *)thing) < 0)
H5E_THROW(H5E_CANTFREE, "can't free fixed array data block page")
CATCH
-END_FUNC(STATIC) /* end H5FA__cache_dblk_page_dest() */
+END_FUNC(STATIC) /* end H5FA__cache_dblk_page_free_icr() */
diff --git a/src/H5FAdbg.c b/src/H5FAdbg.c
index d5239d3..6a84fc9 100644
--- a/src/H5FAdbg.c
+++ b/src/H5FAdbg.c
@@ -117,7 +117,7 @@ H5FA__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
} /* end if */
/* Load the fixed array header */
- if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header")
/* Print opening message */
@@ -198,11 +198,11 @@ H5FA__dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
} /* end if */
/* Load the fixed array header */
- if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header")
/* Protect data block */
- if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, addr, H5AC_READ)))
+ if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, addr, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)addr)
/* Print opening message */
@@ -246,7 +246,7 @@ H5FA__dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
if(((page_idx + 1) == dblock->npages) && (nelmts_left = hdr->cparam.nelmts % dblock->dblk_page_nelmts))
dblk_page_nelmts = (size_t)nelmts_left;
- if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC_READ)))
+ if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block page, address = %llu", (unsigned long long)dblk_page_addr)
HDfprintf(stream, "%*sElements in page %Zu:\n", indent, "", page_idx);
diff --git a/src/H5FAdblkpage.c b/src/H5FAdblkpage.c
index 1f6b706..e1ea3ac 100644
--- a/src/H5FAdblkpage.c
+++ b/src/H5FAdblkpage.c
@@ -207,7 +207,7 @@ END_FUNC(PKG) /* end H5FA__dblk_page_create() */
BEGIN_FUNC(PKG, ERR,
H5FA_dblk_page_t *, NULL, NULL,
H5FA__dblk_page_protect(H5FA_hdr_t *hdr, hid_t dxpl_id, haddr_t dblk_page_addr,
- size_t dblk_page_nelmts, H5AC_protect_t rw))
+ size_t dblk_page_nelmts, unsigned flags))
/* Local variables */
H5FA_dblk_page_cache_ud_t udata; /* Information needed for loading data block page */
@@ -220,12 +220,16 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(hdr);
HDassert(H5F_addr_defined(dblk_page_addr));
+ /* only the H5AC__READ_ONLY_FLAG is permitted */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Set up user data */
udata.hdr = hdr;
udata.nelmts = dblk_page_nelmts;
+ udata.dblk_page_addr = dblk_page_addr;
/* Protect the data block page */
- if(NULL == (ret_value = (H5FA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLK_PAGE, dblk_page_addr, &udata, rw)))
+ if(NULL == (ret_value = (H5FA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLK_PAGE, dblk_page_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block page, address = %llu", (unsigned long long)dblk_page_addr)
CATCH
diff --git a/src/H5FAdblock.c b/src/H5FAdblock.c
index e6e0e74..42dafeb 100644
--- a/src/H5FAdblock.c
+++ b/src/H5FAdblock.c
@@ -263,7 +263,7 @@ END_FUNC(PKG) /* end H5FA__dblock_create() */
BEGIN_FUNC(PKG, ERR,
H5FA_dblock_t *, NULL, NULL,
H5FA__dblock_protect(H5FA_hdr_t *hdr, hid_t dxpl_id, haddr_t dblk_addr,
- H5AC_protect_t rw))
+ unsigned flags))
/* Local variables */
H5FA_dblock_cache_ud_t udata; /* Information needed for loading data block */
@@ -276,11 +276,15 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(hdr);
HDassert(H5F_addr_defined(dblk_addr));
+ /* only the H5AC__READ_ONLY_FLAG flag is permitted */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Set up user data */
udata.hdr = hdr;
+ udata.dblk_addr = dblk_addr;
/* Protect the data block */
- if(NULL == (ret_value = (H5FA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLOCK, dblk_addr, &udata, rw)))
+ if(NULL == (ret_value = (H5FA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLOCK, dblk_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)dblk_addr)
CATCH
@@ -350,7 +354,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(H5F_addr_defined(dblk_addr));
/* Protect data block */
- if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, dblk_addr, H5AC_WRITE)))
+ if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, dblk_addr, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)dblk_addr)
/* Check if data block is paged */
diff --git a/src/H5FAhdr.c b/src/H5FAhdr.c
index 23c20bc..9083a52 100644
--- a/src/H5FAhdr.c
+++ b/src/H5FAhdr.c
@@ -408,16 +408,25 @@ END_FUNC(PKG) /* end H5FA__hdr_modified() */
BEGIN_FUNC(PKG, ERR,
H5FA_hdr_t *, NULL, NULL,
H5FA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata,
- H5AC_protect_t rw))
+ unsigned flags))
/* Local variables */
+ H5FA_hdr_cache_ud_t udata; /* User data for cache callbacks */
/* Sanity check */
HDassert(f);
HDassert(H5F_addr_defined(fa_addr));
+ /* only the H5AC__READ_ONLY_FLAG is permitted */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
+ /* Set up user data for cache callbacks */
+ udata.f = f;
+ udata.addr = fa_addr;
+ udata.ctx_udata = ctx_udata;
+
/* Protect the header */
- if(NULL == (ret_value = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, ctx_udata, rw)))
+ if(NULL == (ret_value = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array header, address = %llu", (unsigned long long)fa_addr)
CATCH
diff --git a/src/H5FApkg.h b/src/H5FApkg.h
index e7993a6..bed9c83 100644
--- a/src/H5FApkg.h
+++ b/src/H5FApkg.h
@@ -206,15 +206,24 @@ struct H5FA_t {
/* Metadata cache callback user data types */
+/* Info needed for loading header */
+typedef struct H5FA_hdr_cache_ud_t {
+ H5F_t *f; /* Pointer to file for fixed array */
+ haddr_t addr; /* Address of header on disk */
+ void *ctx_udata; /* User context for class */
+} H5FA_hdr_cache_ud_t;
+
/* Info needed for loading data block */
typedef struct H5FA_dblock_cache_ud_t {
H5FA_hdr_t *hdr; /* Shared fixed array information */
+ haddr_t dblk_addr; /* Address of data block on disk */
} H5FA_dblock_cache_ud_t;
/* Info needed for loading data block page */
typedef struct H5FA_dblk_page_cache_ud_t {
H5FA_hdr_t *hdr; /* Shared fixed array information */
size_t nelmts; /* Number of elements in data block page */
+ haddr_t dblk_page_addr; /* Address of data block page on disk */
} H5FA_dblk_page_cache_ud_t;
@@ -254,7 +263,7 @@ H5_DLL herr_t H5FA__hdr_fuse_incr(H5FA_hdr_t *hdr);
H5_DLL size_t H5FA__hdr_fuse_decr(H5FA_hdr_t *hdr);
H5_DLL herr_t H5FA__hdr_modified(H5FA_hdr_t *hdr);
H5_DLL H5FA_hdr_t *H5FA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr,
- void *ctx_udata, H5AC_protect_t rw);
+ void *ctx_udata, unsigned flags);
H5_DLL herr_t H5FA__hdr_unprotect(H5FA_hdr_t *hdr, hid_t dxpl_id, unsigned cache_flags);
H5_DLL herr_t H5FA__hdr_delete(H5FA_hdr_t *hdr, hid_t dxpl_id);
H5_DLL herr_t H5FA__hdr_dest(H5FA_hdr_t *hdr);
@@ -264,7 +273,7 @@ H5_DLL H5FA_dblock_t *H5FA__dblock_alloc(H5FA_hdr_t *hdr);
H5_DLL haddr_t H5FA__dblock_create(H5FA_hdr_t *hdr, hid_t dxpl_id, hbool_t *hdr_dirty);
H5_DLL unsigned H5FA__dblock_sblk_idx(const H5FA_hdr_t *hdr, hsize_t idx);
H5_DLL H5FA_dblock_t *H5FA__dblock_protect(H5FA_hdr_t *hdr, hid_t dxpl_id,
- haddr_t dblk_addr, H5AC_protect_t rw);
+ haddr_t dblk_addr, unsigned flags);
H5_DLL herr_t H5FA__dblock_unprotect(H5FA_dblock_t *dblock, hid_t dxpl_id,
unsigned cache_flags);
H5_DLL herr_t H5FA__dblock_delete(H5FA_hdr_t *hdr, hid_t dxpl_id,
@@ -276,7 +285,7 @@ H5_DLL herr_t H5FA__dblk_page_create(H5FA_hdr_t *hdr, hid_t dxpl_id,
haddr_t addr, size_t nelmts);
H5_DLL H5FA_dblk_page_t *H5FA__dblk_page_alloc(H5FA_hdr_t *hdr, size_t nelmts);
H5_DLL H5FA_dblk_page_t *H5FA__dblk_page_protect(H5FA_hdr_t *hdr, hid_t dxpl_id,
- haddr_t dblk_page_addr, size_t dblk_page_nelmts, H5AC_protect_t rw);
+ haddr_t dblk_page_addr, size_t dblk_page_nelmts, unsigned flags);
H5_DLL herr_t H5FA__dblk_page_unprotect(H5FA_dblk_page_t *dblk_page,
hid_t dxpl_id, unsigned cache_flags);
H5_DLL herr_t H5FA__dblk_page_dest(H5FA_dblk_page_t *dblk_page);
diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c
index d993ad0..9edbc72 100644
--- a/src/H5FDfamily.c
+++ b/src/H5FDfamily.c
@@ -905,7 +905,7 @@ H5FD_family_query(const H5FD_t * _file, unsigned long *flags /* out */)
/* Check for flags that are set by h5repart */
if(file && file->repart_members)
- *flags |= H5FD_FEAT_DIRTY_SBLK_LOAD; /* Mark the superblock dirty when it is loaded (so the family member sizes are rewritten) */
+ *flags |= H5FD_FEAT_DIRTY_DRVRINFO_LOAD; /* Mark the superblock dirty when it is loaded (so the family member sizes are rewritten) */
} /* end if */
FUNC_LEAVE_NOAPI(SUCCEED)
diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h
index df5d5cf..58066cc 100644
--- a/src/H5FDpublic.h
+++ b/src/H5FDpublic.h
@@ -198,12 +198,12 @@ typedef enum H5F_mem_t H5FD_mem_t;
*/
#define H5FD_FEAT_IGNORE_DRVRINFO 0x00000020
/*
- * Defining the H5FD_FEAT_DIRTY_SBLK_LOAD for a VFL driver means that
- * the library will mark the superblock dirty when the file is opened
+ * Defining the H5FD_FEAT_DIRTY_DRVRINFO_LOAD for a VFL driver means that
+ * the library will mark the driver info dirty when the file is opened
* R/W. This will cause the driver info to be re-encoded when the file
* is flushed/closed.
*/
-#define H5FD_FEAT_DIRTY_SBLK_LOAD 0x00000040
+#define H5FD_FEAT_DIRTY_DRVRINFO_LOAD 0x00000040
/*
* Defining the H5FD_FEAT_POSIX_COMPAT_HANDLE for a VFL driver means that
* the handle for the VFD (returned with the 'get_handle' callback) is
diff --git a/src/H5FS.c b/src/H5FS.c
index 42ea070..182ac05 100644
--- a/src/H5FS.c
+++ b/src/H5FS.c
@@ -216,7 +216,7 @@ HDfprintf(stderr, "%s: Opening free space manager, fs_addr = %a, nclasses = %Zu\
cache_udata.addr = fs_addr;
/* Protect the free space header */
- if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC_READ)))
+ if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, NULL, "unable to load free space header")
#ifdef H5FS_DEBUG
HDfprintf(stderr, "%s: fspace->sect_addr = %a\n", FUNC, fspace->sect_addr);
@@ -329,7 +329,7 @@ HDfprintf(stderr, "%s: Deleting free space manager, fs_addr = %a\n", FUNC, fs_ad
#endif /* H5FS_DEBUG */
/* Protect the free space header */
- if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, FAIL, "unable to protect free space header")
/* Sanity check */
@@ -361,8 +361,19 @@ HDfprintf(stderr, "%s: Expunging free space section info from cache\n", FUNC);
#endif /* H5FS_DEBUG */
/* Evict the free space section info from the metadata cache */
/* (Free file space) */
- if(H5AC_expunge_entry(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, H5AC__FREE_FILE_SPACE_FLAG) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "unable to remove free space section info from cache")
+ {
+ unsigned cache_flags = H5AC__NO_FLAGS_SET;
+
+ /* if the indirect block is in real file space, tell
+ * the cache to free its file space.
+ */
+ if (!H5F_IS_TMP_ADDR(f, fspace->sect_addr))
+ cache_flags |= H5AC__FREE_FILE_SPACE_FLAG;
+
+ if(H5AC_expunge_entry(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, cache_flags) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "unable to remove free space section info from cache")
+ }
+
#ifdef H5FS_DEBUG
HDfprintf(stderr, "%s: Done expunging free space section info from cache\n", FUNC);
#endif /* H5FS_DEBUG */
@@ -938,7 +949,7 @@ H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id)
cache_udata.f = f;
cache_udata.dxpl_id = dxpl_id;
cache_udata.fspace = fspace;
- if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, &cache_udata, H5AC_READ)))
+ if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, FAIL, "unable to protect free space section info")
/* Unload and release ownership of the free-space manager section info */
@@ -979,7 +990,7 @@ H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id)
cache_udata.nclasses = 0;
cache_udata.classes = NULL;
cache_udata.cls_init_udata = NULL;
- if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fspace->addr, &cache_udata, H5AC_READ)))
+ if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fspace->addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, FAIL, "unable to protect free space section info")
/* Unpin the free-space manager header */
diff --git a/src/H5FScache.c b/src/H5FScache.c
index 461af99..25a9c5e 100644
--- a/src/H5FScache.c
+++ b/src/H5FScache.c
@@ -49,9 +49,6 @@
#define H5FS_HDR_VERSION 0 /* Header */
#define H5FS_SINFO_VERSION 0 /* Serialized sections */
-/* Size of stack buffer for serialized headers */
-#define H5FS_HDR_BUF_SIZE 256
-
/******************/
/* Local Typedefs */
@@ -60,7 +57,7 @@
/* User data for skip list iterator callback for iterating over section size nodes when syncing */
typedef struct {
H5FS_sinfo_t *sinfo; /* Free space section info */
- uint8_t **p; /* Pointer to address of buffer pointer to serialize with */
+ uint8_t **image; /* Pointer to address of buffer pointer to serialize with */
unsigned sect_cnt_size; /* # of bytes to encode section size counts in */
} H5FS_iter_ud_t;
@@ -75,20 +72,35 @@ typedef struct {
/********************/
/* Section info routines */
-static herr_t H5FS_sinfo_serialize_sect_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udata);
-static herr_t H5FS_sinfo_serialize_node_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udata);
+static herr_t H5FS__sinfo_serialize_sect_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udata);
+static herr_t H5FS__sinfo_serialize_node_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udata);
/* Metadata cache callbacks */
-static H5FS_t *H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5FS_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5FS_t *fspace, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5FS_cache_hdr_dest(H5F_t *f, H5FS_t *fspace);
-static herr_t H5FS_cache_hdr_clear(H5F_t *f, H5FS_t *fspace, hbool_t destroy);
-static herr_t H5FS_cache_hdr_size(const H5F_t *f, const H5FS_t *fspace, size_t *size_ptr);
-static H5FS_sinfo_t *H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5FS_cache_sinfo_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5FS_sinfo_t *sinfo, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5FS_cache_sinfo_dest(H5F_t *f, H5FS_sinfo_t *sinfo);
-static herr_t H5FS_cache_sinfo_clear(H5F_t *f, H5FS_sinfo_t *sinfo, hbool_t destroy);
-static herr_t H5FS_cache_sinfo_size(const H5F_t *f, const H5FS_sinfo_t *sinfo, size_t *size_ptr);
+static herr_t H5FS__cache_hdr_get_load_size(const void *udata, size_t *image_len);
+static void *H5FS__cache_hdr_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5FS__cache_hdr_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5FS__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id,
+ void *thing, haddr_t addr, size_t len, size_t compressed_len,
+ haddr_t *new_addr, size_t *new_len, size_t *new_compressed_len,
+ unsigned *flags);
+static herr_t H5FS__cache_hdr_serialize(const H5F_t *f, void *image,
+ size_t len, void *thing);
+static herr_t H5FS__cache_hdr_free_icr(void *thing);
+
+static herr_t H5FS__cache_sinfo_get_load_size(const void *udata, size_t *image_len);
+static void *H5FS__cache_sinfo_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5FS__cache_sinfo_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5FS__cache_sinfo_pre_serialize(const H5F_t *f, hid_t dxpl_id,
+ void *thing, haddr_t addr, size_t len, size_t compressed_len,
+ haddr_t *new_addr, size_t *new_len, size_t *new_compressed_len,
+ unsigned *flags);
+static herr_t H5FS__cache_sinfo_serialize(const H5F_t *f, void *image,
+ size_t len, void *thing);
+static herr_t H5FS__cache_sinfo_free_icr(void *thing);
/*********************/
@@ -97,24 +109,36 @@ static herr_t H5FS_cache_sinfo_size(const H5F_t *f, const H5FS_sinfo_t *sinfo, s
/* H5FS header inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_FSPACE_HDR[1] = {{
- H5AC_FSPACE_HDR_ID,
- (H5AC_load_func_t)H5FS_cache_hdr_load,
- (H5AC_flush_func_t)H5FS_cache_hdr_flush,
- (H5AC_dest_func_t)H5FS_cache_hdr_dest,
- (H5AC_clear_func_t)H5FS_cache_hdr_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5FS_cache_hdr_size,
+ H5AC_FSPACE_HDR_ID, /* Metadata client ID */
+ "Free Space Header", /* Metadata client name (for debugging) */
+ H5FD_MEM_FSPACE_HDR, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5FS__cache_hdr_get_load_size, /* 'get_load_size' callback */
+ H5FS__cache_hdr_deserialize, /* 'deserialize' callback */
+ H5FS__cache_hdr_image_len, /* 'image_len' callback */
+ H5FS__cache_hdr_pre_serialize, /* 'pre_serialize' callback */
+ H5FS__cache_hdr_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5FS__cache_hdr_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
-/* H5FS serialized sections inherit cache-like properties from H5AC */
+/* H5FS section info inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_FSPACE_SINFO[1] = {{
- H5AC_FSPACE_SINFO_ID,
- (H5AC_load_func_t)H5FS_cache_sinfo_load,
- (H5AC_flush_func_t)H5FS_cache_sinfo_flush,
- (H5AC_dest_func_t)H5FS_cache_sinfo_dest,
- (H5AC_clear_func_t)H5FS_cache_sinfo_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5FS_cache_sinfo_size,
+ H5AC_FSPACE_SINFO_ID, /* Metadata client ID */
+ "Free Space Section Info", /* Metadata client name (for debugging) */
+ H5FD_MEM_FSPACE_SINFO, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5FS__cache_sinfo_get_load_size, /* 'get_load_size' callback */
+ H5FS__cache_sinfo_deserialize, /* 'deserialize' callback */
+ H5FS__cache_sinfo_image_len, /* 'image_len' callback */
+ H5FS__cache_sinfo_pre_serialize, /* 'pre_serialize' callback */
+ H5FS__cache_sinfo_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5FS__cache_sinfo_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
@@ -127,44 +151,73 @@ const H5AC_class_t H5AC_FSPACE_SINFO[1] = {{
/* Local Variables */
/*******************/
-/* Declare a free list to manage free space section data to/from disk */
-H5FL_BLK_DEFINE_STATIC(sect_block);
-
/*-------------------------------------------------------------------------
- * Function: H5FS_cache_hdr_load
+ * Function: H5FS__cache_hdr_get_load_size
*
- * Purpose: Loads a free space manager header from the disk.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Success: Pointer to a new free space header
- * Failure: NULL
+ * Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * May 2 2006
+ * koziol@hdfgroup.org
+ * August 14, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FS__cache_hdr_get_load_size(const void *_udata, size_t *image_len)
+{
+ const H5FS_hdr_cache_ud_t *udata = (const H5FS_hdr_cache_ud_t *)_udata; /* User-data for metadata cache callback */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image_len);
+
+ /* Set the image length size */
+ *image_len = (size_t)H5FS_HEADER_SIZE(udata->f);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5FS__cache_hdr_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FS__cache_hdr_deserialize
+ *
+ * Purpose: Given a buffer containing the on disk image of the free space
+ * manager section info, allocate an instance of H5FS_t, load
+ * it with the data contained in the image, and return a pointer
+ * to the new instance.
+ *
+ * Return: Success: Pointer to new object
+ * Failure: NULL
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 18 2013
*
*-------------------------------------------------------------------------
*/
-static H5FS_t *
-H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+static void *
+H5FS__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
{
H5FS_t *fspace = NULL; /* Free space header info */
- H5FS_hdr_cache_ud_t *udata = (H5FS_hdr_cache_ud_t *)_udata; /* user data for callback */
- H5WB_t *wb = NULL; /* Wrapped buffer for header data */
- uint8_t hdr_buf[H5FS_HDR_BUF_SIZE]; /* Buffer for header */
- uint8_t *hdr; /* Pointer to header buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ H5FS_hdr_cache_ud_t *udata = (H5FS_hdr_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
unsigned nclasses; /* Number of section classes */
H5FS_t *ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
- HDassert(f);
+ HDassert(image);
HDassert(udata);
+ HDassert(udata->f);
/* Allocate a new free space manager */
if(NULL == (fspace = H5FS__new(udata->f, udata->nclasses, udata->classes, udata->cls_init_udata)))
@@ -173,80 +226,69 @@ H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Set free space manager's internal information */
fspace->addr = udata->addr;
- /* Wrap the local buffer for serialized header info */
- if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, NULL, "can't wrap buffer")
-
- /* Get a pointer to a buffer that's large enough for header */
- if(NULL == (hdr = (uint8_t *)H5WB_actual(wb, fspace->hdr_size)))
- HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, NULL, "can't get actual buffer")
-
- /* Read header from disk */
- if(H5F_block_read(f, H5FD_MEM_FSPACE_HDR, addr, fspace->hdr_size, dxpl_id, hdr) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_READERROR, NULL, "can't read free space header")
-
- p = hdr;
-
/* Magic number */
- if(HDmemcmp(p, H5FS_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5FS_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "wrong free space header signature")
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5FS_HDR_VERSION)
+ if(*image++ != H5FS_HDR_VERSION)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "wrong free space header version")
/* Client ID */
- fspace->client = (H5FS_client_t)*p++;
+ fspace->client = (H5FS_client_t)*image++;
if(fspace->client >= H5FS_NUM_CLIENT_ID)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "unknown client ID in free space header")
/* Total space tracked */
- H5F_DECODE_LENGTH(udata->f, p, fspace->tot_space);
+ H5F_DECODE_LENGTH(udata->f, image, fspace->tot_space);
/* Total # of free space sections tracked */
- H5F_DECODE_LENGTH(udata->f, p, fspace->tot_sect_count);
+ H5F_DECODE_LENGTH(udata->f, image, fspace->tot_sect_count);
/* # of serializable free space sections tracked */
- H5F_DECODE_LENGTH(udata->f, p, fspace->serial_sect_count);
+ H5F_DECODE_LENGTH(udata->f, image, fspace->serial_sect_count);
/* # of ghost free space sections tracked */
- H5F_DECODE_LENGTH(udata->f, p, fspace->ghost_sect_count);
+ H5F_DECODE_LENGTH(udata->f, image, fspace->ghost_sect_count);
/* # of section classes */
/* (only check if we actually have some classes) */
- UINT16DECODE(p, nclasses);
+ UINT16DECODE(image, nclasses);
if(fspace->nclasses > 0 && fspace->nclasses != nclasses)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "section class count mismatch")
/* Shrink percent */
- UINT16DECODE(p, fspace->shrink_percent);
+ UINT16DECODE(image, fspace->shrink_percent);
/* Expand percent */
- UINT16DECODE(p, fspace->expand_percent);
+ UINT16DECODE(image, fspace->expand_percent);
- /* Size of address space free space sections are within (log2 of actual value) */
- UINT16DECODE(p, fspace->max_sect_addr);
+ /* Size of address space free space sections are within
+ * (log2 of actual value)
+ */
+ UINT16DECODE(image, fspace->max_sect_addr);
/* Max. size of section to track */
- H5F_DECODE_LENGTH(udata->f, p, fspace->max_sect_size);
+ H5F_DECODE_LENGTH(udata->f, image, fspace->max_sect_size);
/* Address of serialized free space sections */
- H5F_addr_decode(udata->f, &p, &fspace->sect_addr);
+ H5F_addr_decode(udata->f, &image, &fspace->sect_addr);
/* Size of serialized free space sections */
- H5F_DECODE_LENGTH(udata->f, p, fspace->sect_size);
+ H5F_DECODE_LENGTH(udata->f, image, fspace->sect_size);
/* Allocated size of serialized free space sections */
- H5F_DECODE_LENGTH(udata->f, p, fspace->alloc_sect_size);
+ H5F_DECODE_LENGTH(udata->f, image, fspace->alloc_sect_size);
/* Compute checksum on indirect block */
- computed_chksum = H5_checksum_metadata(hdr, (size_t)(p - (const uint8_t *)hdr), 0);
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
- HDassert((size_t)(p - (const uint8_t *)hdr) == fspace->hdr_size);
+ /* Sanity check */
+ HDassert((size_t)(image - (const uint8_t *)_image) <= len);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -257,177 +299,432 @@ H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
done:
/* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_FSPACE, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
if(!ret_value && fspace)
if(H5FS__hdr_dest(fspace) < 0)
HDONE_ERROR(H5E_FSPACE, H5E_CANTFREE, NULL, "unable to destroy free space header")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5FS_cache_hdr_load() */ /*lint !e818 Can't make udata a pointer to const */
+} /* end H5FS__cache_hdr_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5FS_cache_hdr_flush
+ * Function: H5FS__cache_hdr_image_len
*
- * Purpose: Flushes a dirty free space header to disk.
+ * Purpose: Compute the size of the data structure on disk and return
+ * it in *image_len.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * May 2 2006
+ * koziol@hdfgroup.org
+ * August 14, 2013
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5FS_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5FS_t *fspace, unsigned H5_ATTR_UNUSED * flags_ptr)
+H5FS__cache_hdr_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- H5WB_t *wb = NULL; /* Wrapped buffer for header data */
- uint8_t hdr_buf[H5FS_HDR_BUF_SIZE]; /* Buffer for header */
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5FS_t *fspace = (const H5FS_t *)_thing; /* Pointer to the object */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(fspace);
+ HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR);
+ HDassert(image_len);
+
+ /* Set the image length size */
+ *image_len = fspace->hdr_size;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5FS__cache_hdr_image_len() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FS__cache_hdf_pre_serialize
+ *
+ * Purpose: The free space manager header contains the address, size, and
+ * allocation size of the free space manager section info. However,
+ * since it is possible for the section info to either not be allocated
+ * at all, or be allocated in temporary (AKA imaginary) files space,
+ * it is possible for the above mentioned fields to contain giberish
+ * when the free space manager header is serialized.
+ *
+ * This function exists to prevent this problem. It does so by
+ * forcing allocation of real file space for the section information.
+ *
+ * Note that in the Version 2 cache, this problem was dealt with by
+ * simply flushing the section info before flushing the header. This
+ * was possible, since the clients handled file I/O directly. As
+ * this responsibility has moved to the cache in Version 3, this
+ * solution is no longer directly applicable.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FS__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
+ haddr_t addr, size_t H5_ATTR_UNUSED len, size_t H5_ATTR_UNUSED compressed_len,
+ haddr_t *new_addr, size_t *new_len, size_t H5_ATTR_UNUSED *new_compressed_len,
+ unsigned *flags)
+{
+ H5FS_t *fspace = (H5FS_t *)_thing; /* Pointer to the object */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
- /* check arguments */
+ /* Sanity check */
HDassert(f);
- HDassert(H5F_addr_defined(addr));
HDassert(fspace);
- HDassert(H5F_addr_defined(fspace->addr));
+ HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(new_addr);
+ HDassert(new_len);
+ HDassert(flags);
- /* Check if the header "owns" the section info */
if(fspace->sinfo) {
- /* Sanity check - should not be trying to destroy header if it still
- * "owns" section info
+ /* This implies that the header "owns" the section info.
+ *
+ * Unfortunately, the comments in the code are not clear as to
+ * what this means, but from reviewing the code (most particularly
+ * H5FS_close(), H5FS_sinfo_lock, and H5FS_sinfo_unlock()), I
+ * gather that it means that the header is maintaining a pointer to
+ * an instance of H5FS_sinfo_t in which free space data is
+ * maintained, and either:
+ *
+ * 1) The instance of H5FS_sinfo_t is not in the metadata cache.
+ *
+ * This will be TRUE iff H5F_addr_defined(fspace->sect_addr)
+ * is FALSE, and fspace->sinfo is not NULL. This is sometimes
+ * referred to as "floating" section info in the comments.
+ *
+ * If the section info structure contains free space data
+ * that must be placed on disk eventually, then
+ *
+ * fspace->serial_sect_count > 0
+ *
+ * and
+ *
+ * H5F_addr_defined(fspace->addr)
+ *
+ * will both be TRUE. If this contition does not hold, then
+ * either the free space info is not persistant
+ * (!H5F_addr_defined(fspace->addr)???) or the section info
+ * contains no free space data that must be written to file
+ * ( fspace->serial_sect_count == 0 ).
+ *
+ * 2) The instance of H5FS_sinfo_t is in the metadata cache with
+ * address in temporary file space (AKA imaginary file space).
+ * The entry may or may not be protected, and if protected, it
+ * may be protected either RW or RO (as indicated by
+ * fspace->sinfo_protected and fspace->sinfo_accmod).
+ *
+ * 3) The instance of H5FS_sinfo_t is in the metadata cache with
+ * address in real file space. As in case 2) above, the entry
+ * may or may not be protected, and if protected, it
+ * may be protected either RW or RO (as indicated by
+ * fspace->sinfo_protected and fspace->sinfo_accmod).
+ *
+ * Observe that fspace->serial_sect_count > 0 must be TRUE in
+ * cases 2) and 3), as the section info should not be stored on
+ * disk if it doesn't exist. Similarly, since the section info
+ * will not be stored to disk unless the header is,
+ * H5F_addr_defined(fspace->addr) must hold as well.
+ *
+ * As the objective is to touch up the free space manager header
+ * so that it contains sensical data on the size and location of
+ * the section information, we have to handle each of the above
+ * cases differently.
+ *
+ * Case 1) If either fspace->serial_sect_count == 0 or
+ * ! H5F_addr_defined(fspace->addr) do nothing as either
+ * the free space manager data is not persistant, or the
+ * section info is empty.
+ *
+ * Otherwise, allocate space for the section info in real
+ * file space, insert the section info at this location, and
+ * set fspace->sect_addr, fspace->sect_size, and
+ * fspace->alloc_sect_size to reflect the new location
+ * of the section info. Note that it is not necessary to
+ * force a write of the section info.
+ *
+ * Case 2) Allocate space for the section info in real file space,
+ * and tell the metadata cache to relocate the entry.
+ * Update fspace->sect_addr, fspace->sect_size, and
+ * fspace->alloc_sect_size to reflect the new location.
+ *
+ * Case 3) Nothing to be done in this case, although it is useful
+ * to perform sanity checks.
+ *
+ * Note that while we may alter the contents of the free space
+ * header in cases 1) and 2), there is no need to mark the header
+ * as dirty, as the metadata cache would not be attempting to
+ * serialize the header if it though it was clean.
*/
- HDassert(!destroy);
-
- /* Check if the section info is dirty */
- if(fspace->sinfo->dirty) {
- if(fspace->serial_sect_count > 0) {
- /* Check if we need to allocate space for section info */
- if(H5F_IS_TMP_ADDR(f, fspace->sect_addr) || !H5F_addr_defined(fspace->sect_addr)) {
- /* Sanity check */
- HDassert(fspace->sect_size > 0);
-
- /* Allocate space for the section info in file */
- if(HADDR_UNDEF == (fspace->sect_addr = H5MF_alloc(f, H5FD_MEM_FSPACE_SINFO, dxpl_id, fspace->sect_size)))
- HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, FAIL, "file allocation failed for free space sections")
- fspace->alloc_sect_size = (size_t)fspace->sect_size;
-
- /* Mark header dirty */
- /* (don't use cache API, since we're in a callback) */
- fspace->cache_info.is_dirty = TRUE;
- } /* end if */
-
- /* Write section info to file */
- if(H5FS_cache_sinfo_flush(f, dxpl_id, FALSE, fspace->sect_addr, fspace->sinfo, NULL) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTFLUSH, FAIL, "unable to save free space section info to disk")
+ if(fspace->serial_sect_count > 0 && H5F_addr_defined(fspace->addr)) {
+ /* Sanity check */
+ HDassert(fspace->sect_size > 0);
+
+ if(!H5F_addr_defined(fspace->sect_addr)) { /* case 1 */
+ /* allocate file space for the section info, and insert it
+ * into the metadata cache.
+ */
+ if(HADDR_UNDEF == (fspace->sect_addr = H5MF_alloc((H5F_t *)f, H5FD_MEM_FSPACE_SINFO, dxpl_id, fspace->sect_size)))
+ HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, FAIL, "file allocation failed for free space sections")
+
+ fspace->alloc_sect_size = (size_t)fspace->sect_size;
+ if(H5AC_insert_entry((H5F_t *)f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, fspace->sinfo, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't add free space sections to cache")
+
+ HDassert(fspace->sinfo->cache_info.size == fspace->alloc_sect_size);
+
+ /* the metadata cache is now managing the section info,
+ * so set fspace->sinfo to NULL.
+ */
+ fspace->sinfo = NULL;
} /* end if */
-
- /* Mark section info clean */
- fspace->sinfo->dirty = FALSE;
- } /* end if */
+ else if(H5F_IS_TMP_ADDR(f, fspace->sect_addr)) { /* case 2 */
+ haddr_t new_sect_addr;
+
+ /* move the section info from temporary (AKA imaginary) file
+ * space to real file space.
+ */
+
+ /* if my reading of the code is correct, this should always
+ * be the case. If not, we will have to add code to resize
+ * file space allocation for section info as well as moving it.
+ */
+ HDassert(fspace->sect_size > 0);
+ HDassert(fspace->alloc_sect_size == (size_t)fspace->sect_size);
+
+ /* Allocate space for the section info in file */
+ if(HADDR_UNDEF == (new_sect_addr = H5MF_alloc((H5F_t *)f, H5FD_MEM_FSPACE_SINFO, dxpl_id, fspace->sect_size)))
+ HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, FAIL, "file allocation failed for free space sections")
+
+ fspace->alloc_sect_size = (size_t)fspace->sect_size;
+ HDassert(fspace->sinfo->cache_info.size == fspace->alloc_sect_size);
+
+ /* Let the metadata cache know the section info moved */
+ if(H5AC_move_entry((H5F_t *)f, H5AC_FSPACE_SINFO, fspace->sect_addr, new_sect_addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move section info")
+
+ fspace->sect_addr = new_sect_addr;
+ } /* end else-if */
+ else { /* case 3 -- nothing to do but sanity checking */
+ /* if my reading of the code is correct, this should always
+ * be the case. If not, we will have to add code to resize
+ * file space allocation for section info.
+ */
+ HDassert(fspace->sect_size > 0);
+ HDassert(fspace->alloc_sect_size == (size_t)fspace->sect_size);
+ } /* end else */
+ } /* end else */
+ else {
+ /* for one reason or another (see comment above) there should
+ * not be any file space allocated for the section info.
+ */
+ HDassert(!H5F_addr_defined(fspace->sect_addr));
+ } /* end else */
} /* end if */
- else if(fspace->serial_sect_count > 0)
- /* Sanity check that section info has address */
- HDassert(H5F_addr_defined(fspace->sect_addr));
+ else if(H5F_addr_defined(fspace->sect_addr)) {
+ /* Here the metadata cache is managing the section info.
+ *
+ * Do some sanity checks, and then test to see if the section
+ * info is in real file space. If it isn't relocate it into
+ * real file space lest the header be written to file with
+ * a nonsense section info address.
+ */
+ HDassert(fspace->serial_sect_count > 0);
+ HDassert(fspace->sect_size > 0);
+ HDassert(fspace->alloc_sect_size == (size_t)fspace->sect_size);
+
+ if(H5F_IS_TMP_ADDR(f, fspace->sect_addr)) {
+ unsigned sect_status = 0;
+ haddr_t new_sect_addr;
+
+ /* we have work to do -- must relocate section info into
+ * real file space.
+ *
+ * Since the section info address is in temporary space (AKA
+ * imaginary space), it follows that the entry must be in
+ * cache. Further, since fspace->sinfo is NULL, it must be
+ * unprotected and un-pinned. Start by verifying this.
+ */
+ if(H5AC_get_entry_status(f, fspace->sect_addr, &sect_status) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info status")
+
+ HDassert(sect_status & H5AC_ES__IN_CACHE);
+ HDassert((sect_status & H5AC_ES__IS_PROTECTED) == 0);
+ HDassert((sect_status & H5AC_ES__IS_PINNED) == 0);
- if(fspace->cache_info.is_dirty) {
- uint8_t *hdr; /* Pointer to header buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
+ /* Allocate space for the section info in file */
+ if(HADDR_UNDEF == (new_sect_addr = H5MF_alloc((H5F_t *)f, H5FD_MEM_FSPACE_SINFO, dxpl_id, fspace->sect_size)))
+ HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, FAIL, "file allocation failed for free space sections")
- /* Wrap the local buffer for serialized header info */
- if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't wrap buffer")
+ fspace->alloc_sect_size = (size_t)fspace->sect_size;
- /* Get a pointer to a buffer that's large enough for header */
- if(NULL == (hdr = (uint8_t *)H5WB_actual(wb, fspace->hdr_size)))
- HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, FAIL, "can't get actual buffer")
+ /* Sanity check */
+ HDassert(!H5F_addr_eq(fspace->sect_addr, new_sect_addr));
- /* Get temporary pointer to header */
- p = hdr;
+ /* Let the metadata cache know the section info moved */
+ if(H5AC_move_entry((H5F_t *)f, H5AC_FSPACE_SINFO, fspace->sect_addr, new_sect_addr) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTMOVE, FAIL, "unable to move section info")
- /* Magic number */
- HDmemcpy(p, H5FS_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
+ /* Update the internal address for the section info */
+ fspace->sect_addr = new_sect_addr;
- /* Version # */
- *p++ = H5FS_HDR_VERSION;
+ /* No need to mark the header dirty, as we are about to
+ * serialize it.
+ */
+ } /* end if */
+ } /* end else-if */
+ else { /* there is no section info at present */
+ /* do some sanity checks */
+ HDassert(fspace->serial_sect_count == 0);
+ HDassert(fspace->tot_sect_count == fspace->ghost_sect_count);
+ } /* end else */
- /* Client ID */
- *p++ = fspace->client;
+ /* what ever happened above, set *flags to 0 */
+ *flags = 0;
- /* Total space tracked */
- H5F_ENCODE_LENGTH(f, p, fspace->tot_space);
+done:
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* end H5FS__cache_hdr_pre_serialize() */
- /* Total # of free space sections tracked */
- H5F_ENCODE_LENGTH(f, p, fspace->tot_sect_count);
+
+/*-------------------------------------------------------------------------
+ * Function: H5FS__cache_hdr_serialize
+ *
+ * Purpose: Given an instance of H5FS_t and a suitably sized buffer,
+ * serialize the contents of the instance of H5FS_t and write
+ * its contents to the buffer. This buffer will be used to
+ * write the image of the instance to file.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FS__cache_hdr_serialize(const H5F_t *f, void *_image, size_t len,
+ void *_thing)
+{
+ H5FS_t *fspace = (H5FS_t *)_thing; /* Pointer to the object */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* # of serializable free space sections tracked */
- H5F_ENCODE_LENGTH(f, p, fspace->serial_sect_count);
+ FUNC_ENTER_STATIC_NOERR
- /* # of ghost free space sections tracked */
- H5F_ENCODE_LENGTH(f, p, fspace->ghost_sect_count);
+ /* Check arguments */
+ HDassert(f);
+ HDassert(image);
+ HDassert(fspace);
+ HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR);
+ HDassert(fspace->hdr_size == len);
+
+ /* The section information does not always exits, and if it does,
+ * it is not always in the cache. To make matters more interesting,
+ * even if it is in the cache, it may not be in real file space.
+ *
+ * The pre-serialize function should have moved the section info
+ * into real file space if necessary before this function was called.
+ * The following asserts are a cursory check on this.
+ */
+ HDassert((! H5F_addr_defined(fspace->sect_addr)) || (! H5F_IS_TMP_ADDR(f, fspace->sect_addr)));
+ HDassert((! H5F_addr_defined(fspace->sect_addr)) ||
+ ((fspace->serial_sect_count > 0) &&
+ (fspace->sect_size > 0) &&
+ (fspace->alloc_sect_size == (size_t)fspace->sect_size)));
- /* # of section classes */
- UINT16ENCODE(p, fspace->nclasses);
+ /* Magic number */
+ HDmemcpy(image, H5FS_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- /* Shrink percent */
- UINT16ENCODE(p, fspace->shrink_percent);
+ /* Version # */
+ *image++ = H5FS_HDR_VERSION;
- /* Expand percent */
- UINT16ENCODE(p, fspace->expand_percent);
+ /* Client ID */
+ *image++ = fspace->client;
- /* Size of address space free space sections are within (log2 of actual value) */
- UINT16ENCODE(p, fspace->max_sect_addr);
+ /* Total space tracked */
+ H5F_ENCODE_LENGTH(f, image, fspace->tot_space);
- /* Max. size of section to track */
- H5F_ENCODE_LENGTH(f, p, fspace->max_sect_size);
+ /* Total # of free space sections tracked */
+ H5F_ENCODE_LENGTH(f, image, fspace->tot_sect_count);
- /* Address of serialized free space sections */
- H5F_addr_encode(f, &p, fspace->sect_addr);
+ /* # of serializable free space sections tracked */
+ H5F_ENCODE_LENGTH(f, image, fspace->serial_sect_count);
- /* Size of serialized free space sections */
- H5F_ENCODE_LENGTH(f, p, fspace->sect_size);
+ /* # of ghost free space sections tracked */
+ H5F_ENCODE_LENGTH(f, image, fspace->ghost_sect_count);
- /* Allocated size of serialized free space sections */
- H5F_ENCODE_LENGTH(f, p, fspace->alloc_sect_size);
+ /* # of section classes */
+ UINT16ENCODE(image, fspace->nclasses);
- /* Compute checksum */
- metadata_chksum = H5_checksum_metadata(hdr, (size_t)(p - (uint8_t *)hdr), 0);
+ /* Shrink percent */
+ UINT16ENCODE(image, fspace->shrink_percent);
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
+ /* Expand percent */
+ UINT16ENCODE(image, fspace->expand_percent);
- /* Write the free space header. */
- HDassert((size_t)(p - hdr) == fspace->hdr_size);
- if(H5F_block_write(f, H5FD_MEM_FSPACE_HDR, addr, fspace->hdr_size, dxpl_id, hdr) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTFLUSH, FAIL, "unable to save free space header to disk")
+ /* Size of address space free space sections are within (log2 of
+ * actual value)
+ */
+ UINT16ENCODE(image, fspace->max_sect_addr);
- fspace->cache_info.is_dirty = FALSE;
- } /* end if */
+ /* Max. size of section to track */
+ H5F_ENCODE_LENGTH(f, image, fspace->max_sect_size);
- if(destroy)
- if(H5FS_cache_hdr_dest(f, fspace) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to destroy free space header")
+ /* Address of serialized free space sections */
+ H5F_addr_encode(f, &image, fspace->sect_addr);
-done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_FSPACE, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer")
+ /* Size of serialized free space sections */
+ H5F_ENCODE_LENGTH(f, image, fspace->sect_size);
+
+ /* Allocated size of serialized free space sections */
+ H5F_ENCODE_LENGTH(f, image, fspace->alloc_sect_size);
+
+ /* Compute checksum */
+ metadata_chksum = H5_checksum_metadata((uint8_t *)_image, (size_t)(image - (uint8_t *)_image), 0);
+
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
+
+ /* sanity checks */
+ HDassert((size_t)(image - (uint8_t *)_image) == fspace->hdr_size);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5FS_cache_hdr_flush() */
+} /* H5FS__cache_hdr_serialize() */
+
+/***************************************/
+/* no H5FS__cache_hdr_notify() function */
+/***************************************/
/*-------------------------------------------------------------------------
- * Function: H5FS_cache_hdr_dest
+ * Function: H5FS__cache_hdr_free_icr
*
* Purpose: Destroys a free space header in memory.
*
- * Return: Non-negative on success/Negative on failure
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
* Programmer: Quincey Koziol
* koziol@ncsa.uiuc.edu
@@ -436,175 +733,133 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5FS_cache_hdr_dest(H5F_t *f, H5FS_t *fspace)
+H5FS__cache_hdr_free_icr(void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5FS_t *fspace = (H5FS_t *)_thing; /* Pointer to the object */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* Check arguments */
+ /* Sanity checks */
HDassert(fspace);
+ HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR);
/* We should not still be holding on to the free space section info */
HDassert(!fspace->sinfo);
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!fspace->cache_info.free_file_space_on_destroy || H5F_addr_defined(fspace->cache_info.addr));
-
- /* Check for freeing file space for free space header */
- if(fspace->cache_info.free_file_space_on_destroy) {
- /* Sanity check */
- HDassert(H5F_addr_defined(fspace->addr));
-
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_FSPACE_HDR, H5AC_dxpl_id, fspace->cache_info.addr, (hsize_t)fspace->hdr_size) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to free free space header")
- } /* end if */
-
/* Destroy free space header */
if(H5FS__hdr_dest(fspace) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to destroy free space header")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5FS_cache_hdr_dest() */
+} /* end H5FS__cache_hdr_free_icr() */
-
-/*-------------------------------------------------------------------------
- * Function: H5FS_cache_hdr_clear
- *
- * Purpose: Mark a free space header in memory as non-dirty.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * May 2 2006
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5FS_cache_hdr_clear(H5F_t *f, H5FS_t *fspace, hbool_t destroy)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- /*
- * Check arguments.
- */
- HDassert(fspace);
-
- /* Reset the dirty flag. */
- fspace->cache_info.is_dirty = FALSE;
-
- if(destroy)
- if(H5FS_cache_hdr_dest(f, fspace) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to destroy free space header")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5FS_cache_hdr_clear() */
+/********************************************************/
+/* metadata cache callback definitions for section info */
+/********************************************************/
/*-------------------------------------------------------------------------
- * Function: H5FS_cache_hdr_size
+ * Function: H5FS__cache_sinfo_get_load_size()
*
- * Purpose: Compute the size in bytes of a free space header
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Compute the size of the on disk image of the free space
+ * manager section info, and place this value in *image_len.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * May 2 2006
+ * Programmer: John Mainzer
+ * 7/7/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5FS_cache_hdr_size(const H5F_t H5_ATTR_UNUSED *f, const H5FS_t *fspace, size_t *size_ptr)
+static herr_t
+H5FS__cache_sinfo_get_load_size(const void *_udata, size_t *image_len)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ const H5FS_t *fspace; /* free space manager */
+ const H5FS_sinfo_cache_ud_t *udata = (const H5FS_sinfo_cache_ud_t *)_udata; /* User data for callback */
- /* check arguments */
- HDassert(f);
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(udata);
+ fspace = udata->fspace;
HDassert(fspace);
- HDassert(size_ptr);
+ HDassert(fspace->sect_size > 0);
+ HDassert(image_len);
- /* Set size value */
- *size_ptr = fspace->hdr_size;
+ *image_len = (size_t)(fspace->sect_size);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5FS_cache_hdr_size() */
+} /* end H5FS__cache_sinfo_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5FS_cache_sinfo_load
+ * Function: H5FS__cache_sinfo_deserialize
*
- * Purpose: Loads free space sections from the disk.
+ * Purpose: Given a buffer containing the on disk image of the free space
+ * manager section info, allocate an instance of H5FS_sinfo_t, load
+ * it with the data contained in the image, and return a pointer to
+ * the new instance.
*
- * Return: Success: Pointer to a new free space section info
- * Failure: NULL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * July 31 2006
+ * Return: Success: Pointer to in core representation
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer
+ * 7/7/14
*
*-------------------------------------------------------------------------
*/
-static H5FS_sinfo_t *
-H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t H5_ATTR_UNUSED addr, void *_udata)
+static void *
+H5FS__cache_sinfo_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t *dirty)
{
- H5FS_sinfo_t *sinfo = NULL; /* Free space section info */
- H5FS_sinfo_cache_ud_t *udata = (H5FS_sinfo_cache_ud_t *)_udata; /* user data for callback */
- haddr_t fs_addr; /* Free space header address */
- size_t old_sect_size; /* Old section size */
- uint8_t *buf = NULL; /* Temporary buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
- uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
- H5FS_sinfo_t *ret_value; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- /* Check arguments */
- HDassert(f);
+ H5FS_sinfo_cache_ud_t *udata = (H5FS_sinfo_cache_ud_t *)_udata; /* User data for callback */
+ H5FS_t *fspace; /* free space manager */
+ H5FS_sinfo_t *sinfo = NULL; /* Free space section info */
+ haddr_t fs_addr; /* Free space header address */
+ size_t old_sect_size; /* Old section size */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum */
+ uint32_t computed_chksum; /* Computed metadata checksum */
+ void * ret_value; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(image);
HDassert(udata);
+ fspace = udata->fspace;
+ HDassert(fspace);
+ HDassert(fspace->sect_size == len);
+ HDassert(dirty);
/* Allocate a new free space section info */
- if(NULL == (sinfo = H5FS_sinfo_new(udata->f, udata->fspace)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
-
- /* Allocate space for the buffer to serialize the sections into */
- H5_CHECKED_ASSIGN(old_sect_size, size_t, udata->fspace->sect_size, hsize_t);
- if(NULL == (buf = H5FL_BLK_MALLOC(sect_block, (size_t)udata->fspace->sect_size)))
+ if(NULL == (sinfo = H5FS_sinfo_new(udata->f, fspace)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- /* Read buffer from disk */
- if(H5F_block_read(f, H5FD_MEM_FSPACE_SINFO, udata->fspace->sect_addr, (size_t)udata->fspace->sect_size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_READERROR, NULL, "can't read free space sections")
-
- /* Deserialize free sections from buffer available */
- p = buf;
+ /* initialize old_sect_size */
+ H5_CHECKED_ASSIGN(old_sect_size, size_t, udata->fspace->sect_size, hsize_t);
/* Magic number */
- if(HDmemcmp(p, H5FS_SINFO_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "wrong free space sections signature")
- p += H5_SIZEOF_MAGIC;
+ if(HDmemcmp(image, H5FS_SINFO_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "wrong free space sections signature")
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5FS_SINFO_VERSION)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "wrong free space sections version")
+ if(*image++ != H5FS_SINFO_VERSION)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "wrong free space sections version")
/* Address of free space header for these sections */
- H5F_addr_decode(udata->f, &p, &fs_addr);
+ H5F_addr_decode(udata->f, &image, &fs_addr);
if(H5F_addr_ne(fs_addr, udata->fspace->addr))
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "incorrect header address for free space sections")
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "incorrect header address for free space sections")
/* Check for any serialized sections */
- if(udata->fspace->serial_sect_count > 0) {
+ if(fspace->serial_sect_count > 0) {
hsize_t old_tot_sect_count; /* Total section count from header */
hsize_t old_serial_sect_count; /* Total serializable section count from header */
hsize_t old_ghost_sect_count; /* Total ghost section count from header */
@@ -615,27 +870,27 @@ H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t H5_ATTR_UNUSED addr, void
sect_cnt_size = H5VM_limit_enc_size((uint64_t)udata->fspace->serial_sect_count);
/* Reset the section count, the "add" routine will update it */
- old_tot_sect_count = udata->fspace->tot_sect_count;
- old_serial_sect_count = udata->fspace->serial_sect_count;
- old_ghost_sect_count = udata->fspace->ghost_sect_count;
- old_tot_space = udata->fspace->tot_space;
- udata->fspace->tot_sect_count = 0;
- udata->fspace->serial_sect_count = 0;
- udata->fspace->ghost_sect_count = 0;
- udata->fspace->tot_space = 0;
-
- /* Walk through the buffer, deserializing sections */
+ old_tot_sect_count = fspace->tot_sect_count;
+ old_serial_sect_count = fspace->serial_sect_count;
+ old_ghost_sect_count = fspace->ghost_sect_count;
+ old_tot_space = fspace->tot_space;
+ fspace->tot_sect_count = 0;
+ fspace->serial_sect_count = 0;
+ fspace->ghost_sect_count = 0;
+ fspace->tot_space = 0;
+
+ /* Walk through the image, deserializing sections */
do {
hsize_t sect_size; /* Current section size */
size_t node_count; /* # of sections of this size */
size_t u; /* Local index variable */
/* The number of sections of this node's size */
- UINT64DECODE_VAR(p, node_count, sect_cnt_size);
+ UINT64DECODE_VAR(image, node_count, sect_cnt_size);
HDassert(node_count);
/* The size of the sections for this node */
- UINT64DECODE_VAR(p, sect_size, sinfo->sect_len_size);
+ UINT64DECODE_VAR(image, sect_size, sinfo->sect_len_size);
HDassert(sect_size);
/* Loop over nodes of this size */
@@ -646,324 +901,306 @@ H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t H5_ATTR_UNUSED addr, void
unsigned des_flags; /* Flags from deserialize callback */
/* The address of the section */
- UINT64DECODE_VAR(p, sect_addr, sinfo->sect_off_size);
+ UINT64DECODE_VAR(image, sect_addr, sinfo->sect_off_size);
/* The type of this section */
- sect_type = *p++;
+ sect_type = *image++;
/* Call 'deserialize' callback for this section */
des_flags = 0;
HDassert(udata->fspace->sect_cls[sect_type].deserialize);
- if(NULL == (new_sect = (*udata->fspace->sect_cls[sect_type].deserialize)(&udata->fspace->sect_cls[sect_type], udata->dxpl_id, p, sect_addr, sect_size, &des_flags)))
+ if(NULL == (new_sect = (*fspace->sect_cls[sect_type].deserialize) (&fspace->sect_cls[sect_type], udata->dxpl_id, image, sect_addr, sect_size, &des_flags)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTDECODE, NULL, "can't deserialize section")
- /* Update offset in serialization buffer */
- p += udata->fspace->sect_cls[sect_type].serial_size;
+ /* Update offset in serialization image */
+ image += udata->fspace->sect_cls[sect_type].serial_size;
/* Insert section in free space manager, unless requested not to */
if(!(des_flags & H5FS_DESERIALIZE_NO_ADD))
if(H5FS_sect_add(udata->f, udata->dxpl_id, udata->fspace, new_sect, H5FS_ADD_DESERIALIZING, NULL) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, NULL, "can't add section to free space manager")
} /* end for */
- } while(p < ((buf + old_sect_size) - H5FS_SIZEOF_CHKSUM));
+ } while(image < (((const uint8_t *)_image + old_sect_size) - H5FS_SIZEOF_CHKSUM));
/* Sanity check */
- HDassert((size_t)(p - buf) == (old_sect_size - H5FS_SIZEOF_CHKSUM));
- HDassert(old_sect_size == udata->fspace->sect_size);
- HDassert(old_tot_sect_count == udata->fspace->tot_sect_count);
- HDassert(old_serial_sect_count == udata->fspace->serial_sect_count);
- HDassert(old_ghost_sect_count == udata->fspace->ghost_sect_count);
- HDassert(old_tot_space == udata->fspace->tot_space);
+ HDassert((size_t)(image - (const uint8_t *)_image) == (old_sect_size - H5FS_SIZEOF_CHKSUM));
+ HDassert(old_sect_size == fspace->sect_size);
+ HDassert(old_tot_sect_count == fspace->tot_sect_count);
+ HDassert(old_serial_sect_count == fspace->serial_sect_count);
+ HDassert(old_ghost_sect_count == fspace->ghost_sect_count);
+ HDassert(old_tot_space == fspace->tot_space);
} /* end if */
/* Compute checksum on indirect block */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - (const uint8_t *)buf), 0);
+ computed_chksum = H5_checksum_metadata((const uint8_t *)_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Verify checksum */
if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_FSPACE, H5E_BADVALUE, NULL, "incorrect metadata checksum for fractal heap indirect block")
+ HGOTO_ERROR(H5E_FSPACE, H5E_BADVALUE, NULL, "incorrect metadata checksum for fractal heap indirect block")
/* Sanity check */
- HDassert((size_t)(p - (const uint8_t *)buf) == old_sect_size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == old_sect_size);
/* Set return value */
ret_value = sinfo;
done:
- if(buf)
- buf = H5FL_BLK_FREE(sect_block, buf);
if(!ret_value && sinfo)
if(H5FS_sinfo_dest(sinfo) < 0)
HDONE_ERROR(H5E_FSPACE, H5E_CANTFREE, NULL, "unable to destroy free space info")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5FS_cache_sinfo_load() */ /*lint !e818 Can't make udata a pointer to const */
+} /* end H5FS__cache_sinfo_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5FS_sinfo_serialize_sect_cb
+ * Function: H5FS__cache_sinfo_image_len
*
- * Purpose: Skip list iterator callback to serialize free space sections
- * of a particular size
+ * Purpose: Compute the size of the data structure on disk and return
+ * it in *image_len.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * Monday, May 8, 2006
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * August 14, 2013
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5FS_sinfo_serialize_sect_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udata)
+H5FS__cache_sinfo_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- H5FS_section_class_t *sect_cls; /* Class of section */
- H5FS_section_info_t *sect= (H5FS_section_info_t *)_item; /* Free space section to work on */
- H5FS_iter_ud_t *udata = (H5FS_iter_ud_t *)_udata; /* Callback info */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- /* Check arguments. */
- HDassert(sect);
- HDassert(udata->sinfo);
- HDassert(udata->p);
+ const H5FS_sinfo_t *sinfo = (const H5FS_sinfo_t *)_thing; /* Pointer to the object */
+ const H5FS_t *fspace; /* Free space header */
- /* Get section's class */
- sect_cls = &udata->sinfo->fspace->sect_cls[sect->type];
-
- /* Check if this section should be serialized (i.e. is not a ghost section) */
- if(!(sect_cls->flags & H5FS_CLS_GHOST_OBJ)) {
- /* The address of the section */
- UINT64ENCODE_VAR(*udata->p, sect->addr, udata->sinfo->sect_off_size);
-
- /* The type of this section */
- *(*udata->p)++ = (uint8_t)sect->type;
+ FUNC_ENTER_STATIC_NOERR
- /* Call 'serialize' callback for this section */
- if(sect_cls->serialize) {
- if((*sect_cls->serialize)(sect_cls, sect, *udata->p) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTSERIALIZE, FAIL, "can't syncronize section")
+ /* Sanity checks */
+ HDassert(sinfo);
+ HDassert(sinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(sinfo->cache_info.type == H5AC_FSPACE_SINFO);
+ fspace = sinfo->fspace;
+ HDassert(fspace);
+ HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR);
+ HDassert(image_len);
- /* Update offset in serialization buffer */
- (*udata->p) += sect_cls->serial_size;
- } /* end if */
- else
- HDassert(sect_cls->serial_size == 0);
- } /* end if */
+ /* Set the image length size */
+ H5_CHECKED_ASSIGN(*image_len, size_t, sinfo->fspace->alloc_sect_size, hsize_t);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5FS_sinfo_serialize_sect_cb() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5FS__cache_sinfo_image_len() */
/*-------------------------------------------------------------------------
- * Function: H5FS_sinfo_serialize_node_cb
+ * Function: H5FS__cache_sinfo_pre_serialize
*
- * Purpose: Skip list iterator callback to serialize free space sections
- * in a bin
+ * Purpose: The objective of this function is to test to see if file space
+ * for the section info is located in temporary (AKA imaginary) file
+ * space. If it is, relocate file space for the section info to
+ * regular file space.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * Monday, May 8, 2006
+ * Programmer: John Mainzer
+ * 7/7/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5FS_sinfo_serialize_node_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udata)
+static herr_t
+H5FS__cache_sinfo_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
+ haddr_t addr, size_t len, size_t H5_ATTR_UNUSED compressed_len, haddr_t *new_addr,
+ size_t *new_len, size_t H5_ATTR_UNUSED *new_compressed_len, unsigned *flags)
{
- H5FS_node_t *fspace_node = (H5FS_node_t *)_item; /* Free space size node to work on */
- H5FS_iter_ud_t *udata = (H5FS_iter_ud_t *)_udata; /* Callback info */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5FS_sinfo_t *sinfo = (H5FS_sinfo_t *)_thing; /* Pointer to the object */
+ H5FS_t *fspace; /* Free space header */
+ haddr_t sinfo_addr; /* Address for section info */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* Check arguments. */
- HDassert(fspace_node);
- HDassert(udata->sinfo);
- HDassert(udata->p);
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(sinfo);
+ HDassert(sinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(sinfo->cache_info.type == H5AC_FSPACE_SINFO);
+ fspace = sinfo->fspace;
+ HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR);
+ HDassert(fspace->cache_info.is_pinned);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(H5F_addr_eq(fspace->sect_addr, addr));
+ HDassert(fspace->sect_size == len);
+ HDassert(new_addr);
+ HDassert(new_len);
+ HDassert(flags);
- /* Check if this node has any serializable sections */
- if(fspace_node->serial_count > 0) {
- /* The number of serializable sections of this node's size */
- UINT64ENCODE_VAR(*udata->p, fspace_node->serial_count, udata->sect_cnt_size);
+ /* we shouldn't be called if the section info is empty */
+ HDassert(fspace->serial_sect_count > 0);
- /* The size of the sections for this node */
- UINT64ENCODE_VAR(*udata->p, fspace_node->sect_size, udata->sinfo->sect_len_size);
+ sinfo_addr = addr; /* this will change if we relocate the section data */
- /* Iterate through all the sections of this size */
- HDassert(fspace_node->sect_list);
- if(H5SL_iterate(fspace_node->sect_list, H5FS_sinfo_serialize_sect_cb, udata) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_BADITER, FAIL, "can't iterate over section nodes")
+ /* Check for section info at temporary address */
+ if(H5F_IS_TMP_ADDR(f, fspace->sect_addr)) {
+ /* Sanity check */
+ HDassert(fspace->sect_size > 0);
+ HDassert(H5F_addr_eq(fspace->sect_addr, addr));
+
+ /* Allocate space for the section info in file */
+ if(HADDR_UNDEF == (sinfo_addr = H5MF_alloc((H5F_t *)f, H5FD_MEM_FSPACE_SINFO, dxpl_id, fspace->sect_size)))
+ HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, FAIL, "file allocation failed for free space sections")
+
+ fspace->alloc_sect_size = (size_t)fspace->sect_size;
+
+ /* Sanity check */
+ HDassert(!H5F_addr_eq(sinfo->fspace->sect_addr, sinfo_addr));
+
+ /* Let the metadata cache know the section info moved */
+ if(H5AC_move_entry((H5F_t *)f, H5AC_FSPACE_SINFO, sinfo->fspace->sect_addr, sinfo_addr) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTMOVE, FAIL, "unable to move section info")
+
+ /* Update the internal address for the section info */
+ sinfo->fspace->sect_addr = sinfo_addr;
+
+ /* Mark free space header as dirty */
+ if(H5AC_mark_entry_dirty(fspace) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTMARKDIRTY, FAIL, "unable to mark free space header as dirty")
+ } /* end if */
+
+ if(!H5F_addr_eq(addr, sinfo_addr)) {
+ *new_addr = sinfo_addr;
+ *flags = H5C__SERIALIZE_MOVED_FLAG;
} /* end if */
+ else
+ *flags = 0;
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5FS_sinfo_serialize_node_cb() */
+} /* end H5FS__cache_sinfo_pre_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5FS_cache_sinfo_flush
+ * Function: H5FS__cache_sinfo_serialize
*
- * Purpose: Flushes a dirty free space section info to disk.
+ * Purpose: Given an instance of H5FS_sinfo_t and a suitably sized buffer,
+ * serialize the contents of the instance of H5FS_sinfo_t and write
+ * its contents to the buffer. This buffer will be used to write
+ * the image of the instance to file.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * July 31 2006
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5FS_cache_sinfo_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5FS_sinfo_t *sinfo, unsigned H5_ATTR_UNUSED * flags_ptr)
+static herr_t
+H5FS__cache_sinfo_serialize(const H5F_t *f, void *_image, size_t len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5FS_sinfo_t *sinfo = (H5FS_sinfo_t *)_thing; /* Pointer to the object */
+ H5FS_t *fspace; /* Free space header */
+ H5FS_iter_ud_t udata; /* User data for callbacks */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
+ unsigned bin; /* Current bin we are on */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* check arguments */
+ /* Sanity checks */
HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ HDassert(image);
HDassert(sinfo);
- HDassert(sinfo->fspace);
- HDassert(sinfo->fspace->sect_cls);
-
- if(sinfo->cache_info.is_dirty || sinfo->dirty) {
- H5FS_iter_ud_t udata; /* User data for callbacks */
- uint8_t *buf = NULL; /* Temporary raw data buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
- unsigned bin; /* Current bin we are on */
-
- /* Sanity check address */
- if(H5F_addr_ne(addr, sinfo->fspace->sect_addr))
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, FAIL, "incorrect address for free space sections")
+ HDassert(sinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(sinfo->cache_info.type == H5AC_FSPACE_SINFO);
+ fspace = sinfo->fspace;
+ HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR);
+ HDassert(fspace->cache_info.is_pinned);
+ HDassert(fspace->sect_size == len);
+ HDassert(fspace->sect_cls);
- /* Allocate temporary buffer */
- if((buf = H5FL_BLK_MALLOC(sect_block, (size_t)sinfo->fspace->sect_size)) == NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
-
- p = buf;
-
- /* Magic number */
- HDmemcpy(p, H5FS_SINFO_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* Version # */
- *p++ = H5FS_SINFO_VERSION;
-
- /* Address of free space header for these sections */
- H5F_addr_encode(f, &p, sinfo->fspace->addr);
-
- /* Set up user data for iterator */
- udata.sinfo = sinfo;
- udata.p = &p;
- udata.sect_cnt_size = H5VM_limit_enc_size((uint64_t)sinfo->fspace->serial_sect_count);
-
- /* Iterate over all the bins */
- for(bin = 0; bin < sinfo->nbins; bin++)
- /* Check if there are any sections in this bin */
- if(sinfo->bins[bin].bin_list)
- /* Iterate over list of section size nodes for bin */
- if(H5SL_iterate(sinfo->bins[bin].bin_list, H5FS_sinfo_serialize_node_cb, &udata) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_BADITER, FAIL, "can't iterate over section size nodes")
-
- /* Compute checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
-
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
-
- /* Sanity check */
- HDassert((size_t)(p - buf) == sinfo->fspace->sect_size);
- HDassert(sinfo->fspace->sect_size <= sinfo->fspace->alloc_sect_size);
-
- /* Check for section info at temporary address */
- if(H5F_IS_TMP_ADDR(f, sinfo->fspace->sect_addr)) {
- /* Sanity check */
- HDassert(sinfo->fspace->sect_size > 0);
- HDassert(H5F_addr_eq(sinfo->fspace->sect_addr, addr));
-
- /* Allocate space for the section info in file */
- if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_FSPACE_SINFO, dxpl_id, sinfo->fspace->sect_size)))
- HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, FAIL, "file allocation failed for free space sections")
- sinfo->fspace->alloc_sect_size = (size_t)sinfo->fspace->sect_size;
+ /* Magic number */
+ HDmemcpy(image, H5FS_SINFO_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- /* Sanity check */
- HDassert(!H5F_addr_eq(sinfo->fspace->sect_addr, addr));
+ /* Version # */
+ *image++ = H5FS_SINFO_VERSION;
- /* Let the metadata cache know the section info moved */
- if(H5AC_move_entry(f, H5AC_FSPACE_SINFO, sinfo->fspace->sect_addr, addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move indirect block")
+ /* Address of free space header for these sections */
+ H5F_addr_encode(f, &image, sinfo->fspace->addr);
- /* Update the internal address for the section info */
- sinfo->fspace->sect_addr = addr;
+ /* Set up user data for iterator */
+ udata.sinfo = sinfo;
+ udata.image = &image;
+ udata.sect_cnt_size = H5VM_limit_enc_size((uint64_t)sinfo->fspace->serial_sect_count);
- /* Mark free space header as dirty */
- if(H5AC_mark_entry_dirty(sinfo->fspace) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTMARKDIRTY, FAIL, "unable to mark free space header as dirty")
- } /* end if */
+ /* Iterate over all the bins */
+ for(bin = 0; bin < sinfo->nbins; bin++)
+ /* Check if there are any sections in this bin */
+ if(sinfo->bins[bin].bin_list)
+ /* Iterate over list of section size nodes for bin */
+ if(H5SL_iterate(sinfo->bins[bin].bin_list, H5FS__sinfo_serialize_node_cb, &udata) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_BADITER, FAIL, "can't iterate over section size nodes")
- /* Write buffer to disk */
- if(H5F_block_write(f, H5FD_MEM_FSPACE_SINFO, sinfo->fspace->sect_addr, (size_t)sinfo->fspace->sect_size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTFLUSH, FAIL, "unable to save free space sections to disk")
- buf = H5FL_BLK_FREE(sect_block, buf);
+ /* Compute checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
- sinfo->cache_info.is_dirty = FALSE;
- sinfo->dirty = FALSE;
- } /* end if */
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
- if(destroy)
- if(H5FS_cache_sinfo_dest(f, sinfo) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to destroy free space section info")
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) == sinfo->fspace->sect_size);
+ HDassert(sinfo->fspace->sect_size <= sinfo->fspace->alloc_sect_size);
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5FS_cache_sinfo_flush() */
+} /* end H5FS__cache_sinfo_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5FS_cache_sinfo_dest
+ * Function: H5FS__cache_sinfo_free_icr
*
- * Purpose: Destroys a free space section info in memory.
+ * Purpose: Free the memory used for the in core representation of the
+ * free space manager section info.
*
- * Return: Non-negative on success/Negative on failure
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * July 31 2006
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5FS_cache_sinfo_dest(H5F_t *f, H5FS_sinfo_t *sinfo)
+static herr_t
+H5FS__cache_sinfo_free_icr(void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5FS_sinfo_t *sinfo = (H5FS_sinfo_t *)_thing; /* Pointer to the object */
+ H5FS_t *fspace; /* Free space header */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* Check arguments */
+ /* Sanity checks */
HDassert(sinfo);
-
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!sinfo->cache_info.free_file_space_on_destroy || H5F_addr_defined(sinfo->cache_info.addr));
-
- /* Check for freeing file space for free space section info */
- if(sinfo->cache_info.free_file_space_on_destroy) {
- /* Sanity check */
- HDassert(sinfo->fspace->alloc_sect_size > 0);
-
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(!H5F_IS_TMP_ADDR(f, sinfo->cache_info.addr))
- if(H5MF_xfree(f, H5FD_MEM_FSPACE_SINFO, H5AC_dxpl_id, sinfo->cache_info.addr, (hsize_t)sinfo->fspace->alloc_sect_size) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to free free space section info")
- } /* end if */
+ HDassert(sinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(sinfo->cache_info.type == H5AC_FSPACE_SINFO);
+ fspace = sinfo->fspace;
+ HDassert(fspace->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR);
+ HDassert(fspace->cache_info.is_pinned);
/* Destroy free space info */
if(H5FS_sinfo_dest(sinfo) < 0)
@@ -971,73 +1208,107 @@ H5FS_cache_sinfo_dest(H5F_t *f, H5FS_sinfo_t *sinfo)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5FS_cache_sinfo_dest() */
+} /* end H5FS__cache_sinfo_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5FS_cache_sinfo_clear
+ * Function: H5FS__sinfo_serialize_sect_cb
*
- * Purpose: Mark a free space section info in memory as non-dirty.
+ * Purpose: Skip list iterator callback to serialize free space sections
+ * of a particular size
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * July 31 2006
+ * Monday, May 8, 2006
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5FS_cache_sinfo_clear(H5F_t *f, H5FS_sinfo_t *sinfo, hbool_t destroy)
+H5FS__sinfo_serialize_sect_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udata)
{
+ H5FS_section_class_t *sect_cls; /* Class of section */
+ H5FS_section_info_t *sect= (H5FS_section_info_t *)_item; /* Free space section to work on */
+ H5FS_iter_ud_t *udata = (H5FS_iter_ud_t *)_udata; /* Callback info */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
- HDassert(sinfo);
+ /* Check arguments. */
+ HDassert(sect);
+ HDassert(udata->sinfo);
+ HDassert(udata->image);
- /* Reset the dirty flag. */
- sinfo->cache_info.is_dirty = FALSE;
+ /* Get section's class */
+ sect_cls = &udata->sinfo->fspace->sect_cls[sect->type];
- if(destroy)
- if(H5FS_cache_sinfo_dest(f, sinfo) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to destroy free space section info")
+ /* Check if this section should be serialized (i.e. is not a ghost section) */
+ if(!(sect_cls->flags & H5FS_CLS_GHOST_OBJ)) {
+ /* The address of the section */
+ UINT64ENCODE_VAR(*udata->image, sect->addr, udata->sinfo->sect_off_size);
+
+ /* The type of this section */
+ *(*udata->image)++ = (uint8_t)sect->type;
+
+ /* Call 'serialize' callback for this section */
+ if(sect_cls->serialize) {
+ if((*sect_cls->serialize)(sect_cls, sect, *udata->image) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTSERIALIZE, FAIL, "can't syncronize section")
+
+ /* Update offset in serialization buffer */
+ (*udata->image) += sect_cls->serial_size;
+ } /* end if */
+ else
+ HDassert(sect_cls->serial_size == 0);
+ } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5FS_cache_sinfo_clear() */
+} /* H5FS__sinfo_serialize_sect_cb() */
/*-------------------------------------------------------------------------
- * Function: H5FS_cache_sinfo_size
+ * Function: H5FS__sinfo_serialize_node_cb
*
- * Purpose: Compute the size in bytes of a free space section info
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Skip list iterator callback to serialize free space sections
+ * in a bin
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * July 31 2006
+ * Monday, May 8, 2006
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5FS_cache_sinfo_size(const H5F_t H5_ATTR_UNUSED *f, const H5FS_sinfo_t *sinfo, size_t *size_ptr)
+H5FS__sinfo_serialize_node_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udata)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ H5FS_node_t *fspace_node = (H5FS_node_t *)_item; /* Free space size node to work on */
+ H5FS_iter_ud_t *udata = (H5FS_iter_ud_t *)_udata; /* Callback info */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* check arguments */
- HDassert(sinfo);
- HDassert(size_ptr);
+ FUNC_ENTER_STATIC
+
+ /* Check arguments. */
+ HDassert(fspace_node);
+ HDassert(udata->sinfo);
+ HDassert(udata->image);
- /* Set size value */
- H5_CHECKED_ASSIGN(*size_ptr, size_t, sinfo->fspace->alloc_sect_size, hsize_t);
+ /* Check if this node has any serializable sections */
+ if(fspace_node->serial_count > 0) {
+ /* The number of serializable sections of this node's size */
+ UINT64ENCODE_VAR(*udata->image, fspace_node->serial_count, udata->sect_cnt_size);
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5FS_cache_sinfo_size() */
+ /* The size of the sections for this node */
+ UINT64ENCODE_VAR(*udata->image, fspace_node->sect_size, udata->sinfo->sect_len_size);
+
+ /* Iterate through all the sections of this size */
+ HDassert(fspace_node->sect_list);
+ if(H5SL_iterate(fspace_node->sect_list, H5FS__sinfo_serialize_sect_cb, udata) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_BADITER, FAIL, "can't iterate over section nodes")
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5FS__sinfo_serialize_node_cb() */
diff --git a/src/H5FSdbg.c b/src/H5FSdbg.c
index b180efd..de66ebd 100644
--- a/src/H5FSdbg.c
+++ b/src/H5FSdbg.c
@@ -121,7 +121,7 @@ H5FS_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int
/*
* Load the free space header.
*/
- if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, addr, &cache_udata, H5AC_READ)))
+ if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, FAIL, "unable to load free space header")
/* Print opening message */
@@ -263,7 +263,7 @@ H5FS_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t H5_ATTR_UNUSED addr, FILE *str
/*
* Load the free space header.
*/
- if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC_READ)))
+ if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, FAIL, "unable to load free space header")
/* Retrieve the client id */
diff --git a/src/H5FSpkg.h b/src/H5FSpkg.h
index 78afde8..2c1eba7 100644
--- a/src/H5FSpkg.h
+++ b/src/H5FSpkg.h
@@ -182,7 +182,9 @@ struct H5FS_t {
unsigned sinfo_lock_count; /* # of times the section info has been locked */
hbool_t sinfo_protected; /* Whether the section info was protected when locked */
hbool_t sinfo_modified; /* Whether the section info has been modified while locked */
- H5AC_protect_t sinfo_accmode; /* Access mode for protecting the section info */
+ unsigned sinfo_accmode; /* Access mode for protecting the section info */
+ /* must be either H5C__NO_FLAGS_SET (i.e r/w) */
+ /* or H5AC__READ_ONLY_FLAG (i.e. r/o). */
size_t max_cls_serial_size; /* Max. additional size of serialized form of section */
hsize_t threshold; /* Threshold for alignment */
hsize_t alignment; /* Alignment */
diff --git a/src/H5FSsection.c b/src/H5FSsection.c
index 8fe4075..0b47f51 100644
--- a/src/H5FSsection.c
+++ b/src/H5FSsection.c
@@ -204,7 +204,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5FS_sinfo_lock(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace, H5AC_protect_t accmode)
+H5FS_sinfo_lock(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace, unsigned accmode)
{
H5FS_sinfo_cache_ud_t cache_udata; /* User-data for cache callback */
herr_t ret_value = SUCCEED; /* Return value */
@@ -220,14 +220,21 @@ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n"
HDassert(f);
HDassert(fspace);
+ /* only H5AC__READ_ONLY_FLAG may appear in accmode */
+ HDassert((accmode & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* If the free space header doesn't already "own" the section info, load
* section info or create it
*/
if(fspace->sinfo) {
/* Check if the section info was protected & we want a different access mode */
+
+ /* only H5AC__READ_ONLY_FLAG may appear in fspace->sinfo_accmode */
+ HDassert(((fspace->sinfo_accmode) & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
if(fspace->sinfo_protected && accmode != fspace->sinfo_accmode) {
/* Check if we need to switch from read-only access to read-write */
- if(H5AC_WRITE == accmode) {
+ if(0 == (accmode & (unsigned)(~H5AC__READ_ONLY_FLAG))) {
/* Unprotect the read-only section info */
if(H5AC_unprotect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, fspace->sinfo, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTUNPROTECT, FAIL, "unable to release free space section info")
@@ -236,11 +243,11 @@ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n"
cache_udata.f = f;
cache_udata.dxpl_id = dxpl_id;
cache_udata.fspace = fspace;
- if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, FAIL, "unable to load free space sections")
/* Switch the access mode we have */
- fspace->sinfo_accmode = H5AC_WRITE;
+ fspace->sinfo_accmode = H5AC__NO_FLAGS_SET;
} /* end if */
} /* end if */
} /* end if */
@@ -331,7 +338,7 @@ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n"
/* Check if we modified any section */
if(modified) {
/* Check if the section info was protected with a different access mode */
- if(fspace->sinfo_protected && fspace->sinfo_accmode != H5AC_WRITE)
+ if(fspace->sinfo_protected && (0 != ((fspace->sinfo_accmode) & H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTDIRTY, FAIL, "attempt to modify read-only section info")
/* If we modified the section info, mark it dirty */
@@ -915,7 +922,7 @@ H5FS_sect_remove(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
HDassert(sect);
/* Get a pointer to the section info */
- if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
+ if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@@ -1396,7 +1403,7 @@ HDfprintf(stderr, "%s: *sect = {%a, %Hu, %u, %s}\n", FUNC, sect->addr, sect->siz
HDassert(sect->size);
/* Get a pointer to the section info */
- if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
+ if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@@ -1494,7 +1501,7 @@ HDfprintf(stderr, "%s: fspace->ghost_sect_count = %Hu\n", FUNC, fspace->ghost_se
H5FS_section_info_t *sect; /* Temporary free space section */
/* Get a pointer to the section info */
- if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
+ if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@@ -1613,7 +1620,7 @@ H5FS_sect_try_merge(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace, H5FS_section_info_t
HDassert(sect->size);
/* Get a pointer to the section info */
- if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
+ if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
saved_fs_size = sect->size;
@@ -1849,7 +1856,7 @@ HDfprintf(stderr, "%s: fspace->ghost_sect_count = %Hu\n", FUNC, fspace->ghost_se
#endif /* QAK */
if(fspace->tot_sect_count > 0) {
/* Get a pointer to the section info */
- if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
+ if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@@ -1997,7 +2004,7 @@ HDfprintf(stderr, "%s: fspace->tot_sect_count = %Hu\n", FUNC, fspace->tot_sect_c
unsigned bin; /* Current bin we are on */
/* Get a pointer to the section info */
- if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_READ) < 0)
+ if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__READ_ONLY_FLAG) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@@ -2089,7 +2096,7 @@ H5FS_sect_change_class(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
HDassert(new_class < fspace->nclasses);
/* Get a pointer to the section info */
- if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
+ if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@@ -2403,7 +2410,7 @@ H5FS_sect_try_shrink_eoa(const H5F_t *f, hid_t dxpl_id, const H5FS_t *fspace, vo
/* Check arguments. */
HDassert(fspace);
- if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
+ if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
diff --git a/src/H5Fint.c b/src/H5Fint.c
index ae6551e..c9e1ac0 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -803,6 +803,14 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
} /* end if */
+ /* if it exists, unpin the driver information block cache entry,
+ * since we're about to destroy the cache
+ */
+ if(f->shared->drvinfo)
+ if(H5AC_unpin_entry(f->shared->drvinfo) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_FSPACE, H5E_CANTUNPIN, FAIL, "unable to unpin drvinfo")
+
/* Unpin the superblock, since we're about to destroy the cache */
if(H5AC_unpin_entry(f->shared->sblock) < 0)
/* Push error, but keep going*/
@@ -857,14 +865,6 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTDEC, FAIL, "can't close property list")
- /* Only truncate the file on an orderly close, with write-access */
- if(f->closing && (H5F_ACC_RDWR & H5F_INTENT(f))) {
- /* Truncate the file to the current allocated size */
- if(H5FD_truncate(f->shared->lf, dxpl_id, (unsigned)TRUE) < 0)
- /* Push error, but keep going*/
- HDONE_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "low level truncate failed")
- } /* end if */
-
/* Close the file */
if(H5FD_close(f->shared->lf) < 0)
/* Push error, but keep going*/
@@ -1182,6 +1182,15 @@ H5F_flush(H5F_t *f, hid_t dxpl_id, hbool_t closing)
/* Push error, but keep going*/
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush metadata cache")
+ /* Truncate the file to the current allocated size */
+ if(H5FD_truncate(f->shared->lf, dxpl_id, closing) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "low level truncate failed")
+
+ /* Flush the entire metadata cache again since the EOA could have changed in the truncate call. */
+ if(H5AC_flush(f, dxpl_id) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush metadata cache")
+
/* Set up I/O info for operation */
fio_info.f = f;
if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index 9f70e12..175fa88 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -81,6 +81,21 @@
#define H5F_SUPERBLOCK_FIXED_SIZE ( H5F_SIGNATURE_LEN \
+ 1) /* superblock version */
+/* The H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE is the minimal amount of super block
+ * variable length data guarnateed to load the sizeof offsets and the sizeof
+ * lengths fields in all versions of the superblock.
+ *
+ * This is necessary in the V3 cache, as on the initial load, we need to
+ * get enough of the superblock to determine its version and size so that
+ * the metadata cache can load the correct amount of data from file to
+ * allow the second deserialization attempt to succeed.
+ *
+ * The value selected will have to be revisited for each new version
+ * of the super block. Note that the current value is one byte larger
+ * than it needs to be.
+ */
+#define H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE 7
+
/* Macros for computing variable-size superblock size */
#define H5F_SUPERBLOCK_VARLEN_SIZE_COMMON \
(2 /* freespace, and root group versions */ \
@@ -113,20 +128,38 @@
+ (sizeof_addr) /* EOF address */ \
+ (sizeof_addr) /* root group object header address */ \
+ H5F_SIZEOF_CHKSUM) /* superblock checksum (keep this last) */
-#define H5F_SUPERBLOCK_VARLEN_SIZE(v, f) ( \
- (v == 0 ? H5F_SUPERBLOCK_VARLEN_SIZE_V0(H5F_SIZEOF_ADDR(f), H5F_SIZEOF_SIZE(f)) : 0) \
- + (v == 1 ? H5F_SUPERBLOCK_VARLEN_SIZE_V1(H5F_SIZEOF_ADDR(f), H5F_SIZEOF_SIZE(f)) : 0) \
- + (v == 2 ? H5F_SUPERBLOCK_VARLEN_SIZE_V2(H5F_SIZEOF_ADDR(f)) : 0))
+#define H5F_SUPERBLOCK_VARLEN_SIZE(v, sizeof_addr, sizeof_size) ( \
+ (v == 0 ? H5F_SUPERBLOCK_VARLEN_SIZE_V0(sizeof_addr, sizeof_size) : 0) \
+ + (v == 1 ? H5F_SUPERBLOCK_VARLEN_SIZE_V1(sizeof_addr, sizeof_size) : 0) \
+ + (v == 2 ? H5F_SUPERBLOCK_VARLEN_SIZE_V2(sizeof_addr) : 0))
/* Total size of superblock, depends on superblock version */
-#define H5F_SUPERBLOCK_SIZE(v, f) ( H5F_SUPERBLOCK_FIXED_SIZE \
- + H5F_SUPERBLOCK_VARLEN_SIZE(v, f))
+#define H5F_SUPERBLOCK_SIZE(s) ( H5F_SUPERBLOCK_FIXED_SIZE \
+ + H5F_SUPERBLOCK_VARLEN_SIZE((s)->super_vers, (s)->sizeof_addr, (s)->sizeof_size))
/* Forward declaration external file cache struct used below (defined in
* H5Fefc.c) */
typedef struct H5F_efc_t H5F_efc_t;
+/* Structure for passing 'user data' to superblock cache callbacks */
+typedef struct H5F_superblock_cache_ud_t {
+/* IN: */
+ H5F_t *f; /* Pointer to file */
+ hbool_t ignore_drvrinfo; /* Indicate if the driver info should be ignored */
+/* OUT: */
+ unsigned sym_leaf_k; /* Symbol table leaf node's 'K' value */
+ unsigned btree_k[H5B_NUM_BTREE_ID]; /* B-tree key values for each type */
+ haddr_t stored_eof; /* End-of-file in file */
+ hbool_t drvrinfo_removed; /* Indicate if the driver info was removed */
+} H5F_superblock_cache_ud_t;
+
+/* Structure for passing 'user data' to driver info block cache callbacks */
+typedef struct H5F_drvrinfo_cache_ud_t {
+ H5F_t *f; /* Pointer to file */
+ haddr_t driver_addr; /* address of driver info block */
+} H5F_drvrinfo_cache_ud_t;
+
/* Structure for metadata & "small [raw] data" block aggregation fields */
struct H5F_blk_aggr_t {
unsigned long feature_flag; /* Feature flag type */
@@ -176,6 +209,8 @@ typedef struct H5F_mtab_t {
typedef struct H5F_super_t {
H5AC_info_t cache_info; /* Cache entry information structure */
unsigned super_vers; /* Superblock version */
+ uint8_t sizeof_addr; /* Size of addresses in file */
+ uint8_t sizeof_size; /* Size of offsets in file */
uint8_t status_flags; /* File status flags */
unsigned sym_leaf_k; /* Size of leaves in symbol tables */
unsigned btree_k[H5B_NUM_BTREE_ID]; /* B-tree key values for each type */
@@ -197,6 +232,13 @@ typedef struct H5F_super_t {
struct H5F_file_t {
H5FD_t *lf; /* Lower level file handle for I/O */
H5F_super_t *sblock; /* Pointer to (pinned) superblock for file */
+ H5O_drvinfo_t *drvinfo; /* Pointer to the (pinned) driver info
+ * cache entry. This field is only defined
+ * for older versions of the super block,
+ * and then only when a driver information
+ * block is present. At all other times
+ * it should be NULL.
+ */
unsigned nrefs; /* Ref count for times file is opened */
unsigned flags; /* Access Permissions for file */
H5F_mtab_t mtab; /* File mount table */
@@ -283,6 +325,7 @@ H5FL_EXTERN(H5F_t);
H5FL_EXTERN(H5F_file_t);
H5_DLLVAR const H5AC_class_t H5AC_SUPERBLOCK[1];
+H5_DLLVAR const H5AC_class_t H5AC_DRVRINFO[1];
/******************************/
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index 166247a..e83330d 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -258,14 +258,21 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id)
{
H5P_genplist_t *dxpl; /* DXPL object */
H5F_super_t * sblock = NULL; /* Superblock structure */
+ H5F_superblock_cache_ud_t udata; /* User data for cache callbacks */
+ H5P_genplist_t *c_plist; /* File creation property list */
unsigned sblock_flags = H5AC__NO_FLAGS_SET; /* flags used in superblock unprotect call */
haddr_t super_addr; /* Absolute address of superblock */
- H5AC_protect_t rw; /* Read/write permissions for file */
- hbool_t dirtied = FALSE; /* Bool for sblock protect call */
+ haddr_t eof; /* End of file address */
+ unsigned rw_flags; /* Read/write permissions for file */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
+ /* initialize the drvinfo to NULL -- we will overwrite this if there
+ * is a driver information block
+ */
+ f->shared->drvinfo = NULL;
+
/* Get the DXPL plist object for DXPL ID */
if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
@@ -284,23 +291,358 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id)
} /* end if */
/* Determine file intent for superblock protect */
- if(H5F_INTENT(f) & H5F_ACC_RDWR)
- rw = H5AC_WRITE;
- else
- rw = H5AC_READ;
+
+ /* Must tell cache at protect time that the super block is to be
+ * flushed last (and collectively in the parallel case).
+ */
+ rw_flags = H5AC__FLUSH_LAST_FLAG;
+#ifdef H5_HAVE_PARALLEL
+ rw_flags |= H5C__FLUSH_COLLECTIVELY_FLAG;
+#endif /* H5_HAVE_PARALLEL */
+ if(!(H5F_INTENT(f) & H5F_ACC_RDWR))
+ rw_flags |= H5AC__READ_ONLY_FLAG;
+
+ /* Get the shared file creation property list */
+ if(NULL == (c_plist = (H5P_genplist_t *)H5I_object(f->shared->fcpl_id)))
+ HGOTO_ERROR(H5E_FILE, H5E_BADTYPE, FAIL, "can't get property list")
+
+ /* Make certain we can read the fixed-size portion of the superblock */
+ if(H5F__set_eoa(f, H5FD_MEM_SUPER,
+ H5F_SUPERBLOCK_FIXED_SIZE + H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "set end of space allocation request failed")
+
+ /* Set up the user data for cache callbacks */
+ udata.f = f;
+ udata.ignore_drvrinfo = H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO);
+ udata.sym_leaf_k = 0;
+ if(H5P_get(c_plist, H5F_CRT_BTREE_RANK_NAME, udata.btree_k) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes")
+ udata.stored_eof = HADDR_UNDEF;
+ udata.drvrinfo_removed = FALSE;
/* Look up the superblock */
- if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &dirtied, rw)))
+ if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &udata, rw_flags)))
HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load superblock")
- /* Mark the superblock dirty if it was modified during loading or VFD indicated to do so */
- if((H5AC_WRITE == rw) && (dirtied || H5F_HAS_FEATURE(f, H5FD_FEAT_DIRTY_SBLK_LOAD)))
- sblock_flags |= H5AC__DIRTIED_FLAG;
-
/* Pin the superblock in the cache */
if(H5AC_pin_protected_entry(sblock) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTPIN, FAIL, "unable to pin superblock")
+ /* Mark the superblock dirty if it was modified during loading */
+ if(((rw_flags & H5AC__READ_ONLY_FLAG) == 0) && udata.ignore_drvrinfo && udata.drvrinfo_removed) {
+ HDassert(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2);
+ sblock_flags |= H5AC__DIRTIED_FLAG;
+ } /* end if */
+
+ /* The superblock must be flushed last (and collectively in parallel) */
+ sblock_flags |= H5AC__FLUSH_LAST_FLAG;
+#ifdef H5_HAVE_PARALLEL
+ sblock_flags |= H5AC__FLUSH_COLLECTIVELY_FLAG;
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Check if superblock address is different from base address and adjust
+ * base address and "end of address" address if so.
+ */
+ if(!H5F_addr_eq(super_addr, sblock->base_addr)) {
+ /* Check if the superblock moved earlier in the file */
+ if(H5F_addr_lt(super_addr, sblock->base_addr))
+ udata.stored_eof -= (sblock->base_addr - super_addr);
+ else
+ /* The superblock moved later in the file */
+ udata.stored_eof += (super_addr - sblock->base_addr);
+
+ /* Adjust base address for offsets of the HDF5 data in the file */
+ sblock->base_addr = super_addr;
+
+ /* Set the base address for the file in the VFD now */
+ if(H5F__set_base_addr(f, sblock->base_addr) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "failed to set base address for file driver")
+
+ /* Indicate that the superblock should be marked dirty */
+ if((rw_flags & H5AC__READ_ONLY_FLAG) == 0)
+ sblock_flags |= H5AC__DIRTIED_FLAG;
+ } /* end if */
+
+ /* Set information in the file's creation property list */
+ if(H5P_set(c_plist, H5F_CRT_SUPER_VERS_NAME, &sblock->super_vers) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set superblock version")
+ if(H5P_set(c_plist, H5F_CRT_ADDR_BYTE_NUM_NAME, &sblock->sizeof_addr) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set byte number in an address")
+ if(H5P_set(c_plist, H5F_CRT_OBJ_BYTE_NUM_NAME, &sblock->sizeof_size) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set byte number for object size")
+
+ /* Handle the B-tree 'K' values */
+ if(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2) {
+ /* Sanity check */
+ HDassert(udata.sym_leaf_k != 0);
+
+ /* Set the symbol table internal node 'K' value */
+ if(H5P_set(c_plist, H5F_CRT_SYM_LEAF_NAME, &udata.sym_leaf_k) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set rank for symbol table leaf nodes")
+ sblock->sym_leaf_k = udata.sym_leaf_k;
+
+ /* Set the B-tree internal node values, etc */
+ if(H5P_set(c_plist, H5F_CRT_BTREE_RANK_NAME, udata.btree_k) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set rank for btree internal nodes")
+ HDmemcpy(sblock->btree_k, udata.btree_k, sizeof(unsigned) * (size_t)H5B_NUM_BTREE_ID);
+ } /* end if */
+ else {
+ /* Get the (default) B-tree internal node values, etc */
+ /* (Note: these may be reset in a superblock extension) */
+ if(H5P_get(c_plist, H5F_CRT_BTREE_RANK_NAME, sblock->btree_k) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes")
+ if(H5P_get(c_plist, H5F_CRT_SYM_LEAF_NAME, &sblock->sym_leaf_k) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes")
+ } /* end else */
+
+ /*
+ * The user-defined data is the area of the file before the base
+ * address.
+ */
+ if(H5P_set(c_plist, H5F_CRT_USER_BLOCK_NAME, &sblock->base_addr) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set userblock size")
+
+ /*
+ * Make sure that the data is not truncated. One case where this is
+ * possible is if the first file of a family of files was opened
+ * individually.
+ */
+ if(HADDR_UNDEF == (eof = H5FD_get_eof(f->shared->lf, H5FD_MEM_DEFAULT)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to determine file size")
+
+ /* (Account for the stored EOA being absolute offset -QAK) */
+ if((eof + sblock->base_addr) < udata.stored_eof)
+ HGOTO_ERROR(H5E_FILE, H5E_TRUNCATED, FAIL, "truncated file: eof = %llu, sblock->base_addr = %llu, stored_eoa = %llu", (unsigned long long)eof, (unsigned long long)sblock->base_addr, (unsigned long long)udata.stored_eof)
+
+ /*
+ * Tell the file driver how much address space has already been
+ * allocated so that it knows how to allocate additional memory.
+ */
+
+ /* Decode the optional driver information block */
+ if(H5F_addr_defined(sblock->driver_addr)) {
+ H5O_drvinfo_t *drvinfo; /* Driver info */
+ H5F_drvrinfo_cache_ud_t drvrinfo_udata; /* User data for metadata callbacks */
+ unsigned drvinfo_flags = H5AC__NO_FLAGS_SET; /* Flags used in driver info block unprotect call */
+
+ /* Sanity check - driver info block should only be defined for
+ * superblock version < 2.
+ */
+ HDassert(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2);
+
+ /* Set up user data */
+ drvrinfo_udata.f = f;
+ drvrinfo_udata.driver_addr = sblock->driver_addr;
+
+ /* extend EOA so we can read at least the fixed sized
+ * portion of the driver info block
+ */
+ if(H5FD_set_eoa(f->shared->lf, H5FD_MEM_SUPER, sblock->driver_addr + H5F_DRVINFOBLOCK_HDR_SIZE) < 0) /* will extend eoa later if required */
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, \
+ "set end of space allocation request failed")
+
+ /* Look up the driver info block */
+ if(NULL == (drvinfo = (H5O_drvinfo_t *)H5AC_protect(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, &drvrinfo_udata, rw_flags)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load driver info block")
+
+ /* Loading the driver info block is enough to set up the right info */
+
+ /* Check if we need to rewrite the driver info block info */
+ if ( ( (rw_flags & H5AC__READ_ONLY_FLAG) == 0 ) &&
+ ( H5F_HAS_FEATURE(f, H5FD_FEAT_DIRTY_DRVRINFO_LOAD) ) ) {
+
+ drvinfo_flags |= H5AC__DIRTIED_FLAG;
+ } /* end if */
+
+ /* set the pin entry flag so that the driver information block
+ * cache entry will be pinned in the cache.
+ */
+ drvinfo_flags |= H5AC__PIN_ENTRY_FLAG;
+
+ /* Release the driver info block */
+ if(H5AC_unprotect(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, drvinfo, drvinfo_flags) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTUNPROTECT, FAIL, "unable to release driver info block")
+
+ /* save a pointer to the driver information cache entry */
+ f->shared->drvinfo = drvinfo;
+ } /* end if */
+
+ /* (Account for the stored EOA being absolute offset -NAF) */
+ if(H5F__set_eoa(f, H5FD_MEM_SUPER, udata.stored_eof - sblock->base_addr) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set end-of-address marker for file")
+
+ /* Decode the optional superblock extension info */
+ if(H5F_addr_defined(sblock->ext_addr)) {
+ H5O_loc_t ext_loc; /* "Object location" for superblock extension */
+ H5O_btreek_t btreek; /* v1 B-tree 'K' value message from superblock extension */
+ H5O_drvinfo_t drvinfo; /* Driver info message from superblock extension */
+ size_t u; /* Local index variable */
+ htri_t status; /* Status for message existing */
+
+ /* Sanity check - superblock extension should only be defined for
+ * superblock version >= 2.
+ */
+ HDassert(sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2);
+
+ /* Check for superblock extension being located "outside" the stored
+ * 'eoa' value, which can occur with the split/multi VFD.
+ */
+ if(H5F_addr_gt(sblock->ext_addr, udata.stored_eof)) {
+ /* Set the 'eoa' for the object header memory type large enough
+ * to give some room for a reasonably sized superblock extension.
+ * (This is _rather_ a kludge -QAK)
+ */
+ if(H5F__set_eoa(f, H5FD_MEM_OHDR, (haddr_t)(sblock->ext_addr + 1024)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set end-of-address marker for file")
+ } /* end if */
+
+ /* Open the superblock extension */
+ if(H5F_super_ext_open(f, sblock->ext_addr, &ext_loc) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "unable to open file's superblock extension")
+
+ /* Check for the extension having a 'driver info' message */
+ if((status = H5O_msg_exists(&ext_loc, H5O_DRVINFO_ID, dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
+ if(status) {
+ /* Check for ignoring the driver info for this file */
+ if(!udata.ignore_drvrinfo) {
+
+ /* Retrieve the 'driver info' structure */
+ if(NULL == H5O_msg_read(&ext_loc, H5O_DRVINFO_ID, &drvinfo, dxpl_id))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "driver info message not present")
+
+ /* Validate and decode driver information */
+ if(H5FD_sb_load(f->shared->lf, drvinfo.name, drvinfo.buf) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, FAIL, "unable to decode driver information")
+
+ /* Reset driver info message */
+ H5O_msg_reset(H5O_DRVINFO_ID, &drvinfo);
+ } /* end else */
+ } /* end if */
+
+ /* Read in the shared OH message information if there is any */
+ if(H5SM_get_info(&ext_loc, c_plist, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to read SOHM table information")
+
+ /* Check for the extension having a 'v1 B-tree "K"' message */
+ if((status = H5O_msg_exists(&ext_loc, H5O_BTREEK_ID, dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
+ if(status) {
+ /* Retrieve the 'v1 B-tree "K"' structure */
+ if(NULL == H5O_msg_read(&ext_loc, H5O_BTREEK_ID, &btreek, dxpl_id))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "v1 B-tree 'K' info message not present")
+
+ /* Set non-default v1 B-tree 'K' value info from file */
+ sblock->btree_k[H5B_CHUNK_ID] = btreek.btree_k[H5B_CHUNK_ID];
+ sblock->btree_k[H5B_SNODE_ID] = btreek.btree_k[H5B_SNODE_ID];
+ sblock->sym_leaf_k = btreek.sym_leaf_k;
+
+ /* Set non-default v1 B-tree 'K' values in the property list */
+ if(H5P_set(c_plist, H5F_CRT_BTREE_RANK_NAME, btreek.btree_k) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set rank for btree internal nodes")
+ if(H5P_set(c_plist, H5F_CRT_SYM_LEAF_NAME, &btreek.sym_leaf_k) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set rank for symbol table leaf nodes")
+ } /* end if */
+
+ /* Check for the extension having a 'free-space manager info' message */
+ if((status = H5O_msg_exists(&ext_loc, H5O_FSINFO_ID, dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
+ if(status) {
+ H5O_fsinfo_t fsinfo; /* Free-space manager info message from superblock extension */
+
+ /* Retrieve the 'free-space manager info' structure */
+ if(NULL == H5O_msg_read(&ext_loc, H5O_FSINFO_ID, &fsinfo, dxpl_id))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get free-space manager info message")
+
+ /* Check for non-default info */
+ if(f->shared->fs_strategy != fsinfo.strategy) {
+ f->shared->fs_strategy = fsinfo.strategy;
+
+ /* Set non-default strategy in the property list */
+ if(H5P_set(c_plist, H5F_CRT_FILE_SPACE_STRATEGY_NAME, &fsinfo.strategy) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set file space strategy")
+ } /* end if */
+ if(f->shared->fs_threshold != fsinfo.threshold) {
+ f->shared->fs_threshold = fsinfo.threshold;
+
+ /* Set non-default threshold in the property list */
+ if(H5P_set(c_plist, H5F_CRT_FREE_SPACE_THRESHOLD_NAME, &fsinfo.threshold) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set file space strategy")
+ } /* end if */
+
+ /* Set free-space manager addresses */
+ f->shared->fs_addr[0] = HADDR_UNDEF;
+ for(u = 1; u < NELMTS(f->shared->fs_addr); u++)
+ f->shared->fs_addr[u] = fsinfo.fs_addr[u-1];
+ } /* end if */
+
+ /* Close superblock extension */
+ if(H5F_super_ext_close(f, &ext_loc, dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEOBJ, FAIL, "unable to close file's superblock extension")
+ } /* end if */
+
+ /* Update the driver info if VFD indicated to do so */
+ /* (NOTE: only for later versions of superblock, earlier versions are handled
+ * earlier in this routine.
+ */
+ if(((rw_flags & H5AC__READ_ONLY_FLAG) == 0) &&
+ sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2 &&
+ H5F_addr_defined(sblock->ext_addr)) {
+ /* Check for modifying the driver info when opening the file */
+ if(H5F_HAS_FEATURE(f, H5FD_FEAT_DIRTY_DRVRINFO_LOAD)) {
+ size_t driver_size; /* Size of driver info block (bytes) */
+
+ /* Check for driver info message */
+ H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
+ if(driver_size > 0) {
+ H5O_drvinfo_t drvinfo; /* Driver info */
+ uint8_t dbuf[H5F_MAX_DRVINFOBLOCK_SIZE]; /* Driver info block encoding buffer */
+
+ /* Sanity check */
+ HDassert(driver_size <= H5F_MAX_DRVINFOBLOCK_SIZE);
+
+ /* Encode driver-specific data */
+ if(H5FD_sb_encode(f->shared->lf, drvinfo.name, dbuf) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to encode driver information")
+
+ /* Set the driver info information for the superblock extension */
+ drvinfo.len = driver_size;
+ drvinfo.buf = dbuf;
+
+ /* Write driver info information to the superblock extension */
+
+#if 1 /* bug fix test code -- tidy this up if all goes well */ /* JRM */
+ /* KLUGE ALERT!!
+ *
+ * H5F_super_ext_write_msg() expects f->shared->sblock to
+ * be set -- verify that it is NULL, and then set it.
+ * Set it back to NULL when we are done.
+ */
+ HDassert(f->shared->sblock == NULL);
+ f->shared->sblock = sblock;
+#endif /* JRM */
+
+ if(H5F_super_ext_write_msg(f, dxpl_id, &drvinfo, H5O_DRVINFO_ID, FALSE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
+
+#if 1 /* bug fix test code -- tidy this up if all goes well */ /* JRM */
+ f->shared->sblock = NULL;
+#endif /* JRM */
+
+ } /* end if */
+ } /* end if */
+ /* Check for eliminating the driver info block */
+ else if(H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
+ /* Remove the driver info message from the superblock extension */
+ if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_DRVINFO_ID) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "error in removing message from superblock extension")
+
+ /* Check if the superblock extension was removed */
+ if(!H5F_addr_defined(sblock->ext_addr))
+ sblock_flags |= H5AC__DIRTIED_FLAG;
+ } /* end if */
+ } /* end if */
+
/* Set the pointer to the pinned superblock */
f->shared->sblock = sblock;
@@ -309,6 +651,32 @@ done:
if(sblock && H5AC_unprotect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, sblock, sblock_flags) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTUNPROTECT, FAIL, "unable to close superblock")
+ /* If we have failed, make sure no entries are left in the
+ * metadata cache, so that it can be shut down and discarded.
+ */
+ if(ret_value < 0) {
+ /* Unpin and discard drvinfo cache entry */
+ if(f->shared->drvinfo) {
+ if(H5AC_unpin_entry(f->shared->drvinfo) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTUNPIN, FAIL, "unable to unpin driver info")
+
+ /* Evict the driver info block from the cache */
+ if(H5AC_expunge_entry(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge driver info block")
+ } /* end if */
+
+ /* Unpin & discard superblock */
+ if(sblock) {
+ /* Unpin superblock in cache */
+ if(H5AC_unpin_entry(sblock) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTUNPIN, FAIL, "unable to unpin superblock")
+
+ /* Evict the superblock from the cache */
+ if(H5AC_expunge_entry(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge superblock")
+ } /* end if */
+ } /* end if */
+
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5F__super_read() */
@@ -334,6 +702,8 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
{
H5F_super_t *sblock = NULL; /* Superblock cache structure */
hbool_t sblock_in_cache = FALSE; /* Whether the superblock has been inserted into the metadata cache */
+ H5O_drvinfo_t *drvinfo = NULL; /* Driver info */
+ hbool_t drvinfo_in_cache = FALSE; /* Whether the driver info block has been inserted into the metadata cache */
H5P_genplist_t *plist; /* File creation property list */
hsize_t userblock_size; /* Size of userblock, in bytes */
hsize_t superblock_size; /* Size of superblock, in bytes */
@@ -427,11 +797,13 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
if(H5F__set_base_addr(f, sblock->base_addr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "failed to set base address for file driver")
- /* Save a local copy of the superblock version number */
+ /* Save a local copy of the superblock version number, size of addresses & offsets */
sblock->super_vers = super_vers;
+ sblock->sizeof_addr = f->shared->sizeof_addr;
+ sblock->sizeof_size = f->shared->sizeof_size;
/* Compute the size of the superblock */
- superblock_size = (hsize_t)H5F_SUPERBLOCK_SIZE(super_vers, f);
+ superblock_size = (hsize_t)H5F_SUPERBLOCK_SIZE(sblock);
/* Compute the size of the driver information block */
H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
@@ -446,10 +818,10 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
} /* end if */
/*
- * Allocate space for the userblock, superblock & driver info blocks.
- * We do it with one allocation request because the userblock and
- * superblock need to be at the beginning of the file and only the first
- * allocation request is required to return memory at format address zero.
+ * Allocate space for the superblock & driver info block.
+ * We do it with one allocation request because the superblock needs to be
+ * at the beginning of the file and only the first allocation request is
+ * required to return memory at format address zero.
*/
if(super_vers < HDF5_SUPERBLOCK_VERSION_2)
superblock_size += driver_size;
@@ -466,6 +838,9 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
/* Keep a copy of the superblock info */
f->shared->sblock = sblock;
+ /* set the drvinfo filed to NULL -- will overwrite this later if needed */
+ f->shared->drvinfo = NULL;
+
/*
* Determine if we will need a superblock extension
*/
@@ -572,6 +947,33 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to update free-space info header message")
} /* end if */
} /* end if */
+ else {
+ /* Check for creating an "old-style" driver info block */
+ if(driver_size > 0) {
+ /* Sanity check */
+ HDassert(H5F_addr_defined(sblock->driver_addr));
+
+ /* Allocate space for the driver info */
+ if(NULL == (drvinfo = (H5O_drvinfo_t *)H5MM_calloc(sizeof(H5O_drvinfo_t))))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, FAIL, "memory allocation failed for driver info message")
+
+ /* Set up driver info message */
+ /* (NOTE: All the actual information (name & driver information) is
+ * actually based on the VFD info in the file handle and
+ * will be encoded by the VFD's 'encode' callback, so it
+ * doesn't need to be set here. -QAK, 7/20/2013
+ */
+ H5_CHECKED_ASSIGN(drvinfo->len, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
+
+ /* Insert driver info block into cache */
+ if(H5AC_insert_entry(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, drvinfo, H5AC__PIN_ENTRY_FLAG | H5AC__FLUSH_LAST_FLAG | H5AC__FLUSH_COLLECTIVELY_FLAG) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINS, FAIL, "can't add driver info block to cache")
+ drvinfo_in_cache = TRUE;
+ f->shared->drvinfo = drvinfo;
+ } /* end if */
+ else
+ HDassert(!H5F_addr_defined(sblock->driver_addr));
+ } /* end if */
done:
/* Close superblock extension, if it was created */
@@ -580,6 +982,23 @@ done:
/* Cleanup on failure */
if(ret_value < 0) {
+ /* Check if the driver info block has been allocated yet */
+ if(drvinfo) {
+ /* Check if we've cached it already */
+ if(drvinfo_in_cache) {
+ /* Unpin drvinfo in cache */
+ if(H5AC_unpin_entry(drvinfo) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTUNPIN, FAIL, "unable to unpin driver info")
+
+ /* Evict the driver info block from the cache */
+ if(H5AC_expunge_entry(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge driver info block")
+ } /* end if */
+ else
+ /* Free driver info block */
+ H5MM_xfree(drvinfo);
+ } /* end if */
+
/* Check if the superblock has been allocated yet */
if(sblock) {
/* Check if we've cached it already */
@@ -635,6 +1054,14 @@ H5F_super_dirty(H5F_t *f)
if(H5AC_mark_entry_dirty(f->shared->sblock) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
+ /* if the driver information block exists, mark it dirty as well
+ * so that the change in eoa will be reflected there as well if
+ * appropriate.
+ */
+ if ( f->shared->drvinfo )
+ if(H5AC_mark_entry_dirty(f->shared->drvinfo) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark drvinfo as dirty")
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5F_super_dirty() */
@@ -698,7 +1125,7 @@ H5F__super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext
/* Set the superblock size */
if(super_size)
- *super_size = (hsize_t)H5F_SUPERBLOCK_SIZE(f->shared->sblock->super_vers, f);
+ *super_size = (hsize_t)H5F_SUPERBLOCK_SIZE(f->shared->sblock);
/* Set the superblock extension size */
if(super_ext_size) {
diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c
index ded845d..cf14770 100644
--- a/src/H5Fsuper_cache.c
+++ b/src/H5Fsuper_cache.c
@@ -51,9 +51,6 @@
/* Local Macros */
/****************/
-/* Maximum size of super-block buffers */
-#define H5F_MAX_SUPERBLOCK_SIZE 134
-
/******************/
/* Local Typedefs */
@@ -70,28 +67,70 @@
/********************/
/* Metadata cache (H5AC) callbacks */
-static H5F_super_t *H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5F_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5F_super_t *sblock);
-static herr_t H5F_sblock_dest(H5F_t *f, H5F_super_t * sblock);
-static herr_t H5F_sblock_clear(H5F_t *f, H5F_super_t *sblock, hbool_t destroy);
-static herr_t H5F_sblock_size(const H5F_t *f, const H5F_super_t *sblock, size_t *size_ptr);
+static herr_t H5F__cache_superblock_get_load_size(const void *udata, size_t *image_len);
+static void *H5F__cache_superblock_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5F__cache_superblock_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5F__cache_superblock_pre_serialize(const H5F_t *f,
+ hid_t dxpl_id, void *thing, haddr_t addr, size_t len,
+ size_t compressed_len, haddr_t *new_addr, size_t *new_len,
+ size_t *new_compressed_len, unsigned *flags);
+static herr_t H5F__cache_superblock_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5F__cache_superblock_free_icr(void *thing);
+
+static herr_t H5F__cache_drvrinfo_get_load_size(const void *udata, size_t *image_len);
+static void *H5F__cache_drvrinfo_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5F__cache_drvrinfo_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5F__cache_drvrinfo_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5F__cache_drvrinfo_free_icr(void *thing);
/*********************/
/* Package Variables */
/*********************/
-/* H5F inherits cache-like properties from H5AC */
+/* H5F superblock inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_SUPERBLOCK[1] = {{
- H5AC_SUPERBLOCK_ID,
- (H5AC_load_func_t)H5F_sblock_load,
- (H5AC_flush_func_t)H5F_sblock_flush,
- (H5AC_dest_func_t)H5F_sblock_dest,
- (H5AC_clear_func_t)H5F_sblock_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5F_sblock_size,
+ H5AC_SUPERBLOCK_ID, /* Metadata client ID */
+ "Superblock", /* Metadata client name (for debugging) */
+ H5FD_MEM_SUPER, /* File space memory type for client */
+ H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
+ H5F__cache_superblock_get_load_size,/* 'get_load_size' callback */
+ H5F__cache_superblock_deserialize, /* 'deserialize' callback */
+ H5F__cache_superblock_image_len, /* 'image_len' callback */
+ H5F__cache_superblock_pre_serialize,/* 'pre_serialize' callback */
+ H5F__cache_superblock_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5F__cache_superblock_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
+}};
+
+/* H5F driver info block inherits cache-like properties from H5AC */
+const H5AC_class_t H5AC_DRVRINFO[1] = {{
+ H5AC_DRVRINFO_ID, /* Metadata client ID */
+ "Driver info block", /* Metadata client name (for debugging) */
+ H5FD_MEM_SUPER, /* File space memory type for client */
+ H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
+ H5F__cache_drvrinfo_get_load_size, /* 'get_load_size' callback */
+ H5F__cache_drvrinfo_deserialize, /* 'deserialize' callback */
+ H5F__cache_drvrinfo_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5F__cache_drvrinfo_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5F__cache_drvrinfo_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
+
/*****************************/
/* Library Private Variables */
/*****************************/
@@ -107,546 +146,470 @@ H5FL_EXTERN(H5F_super_t);
/*-------------------------------------------------------------------------
- * Function: H5F_sblock_load
+ * Function: H5F__cache_superblock_get_load_size
*
- * Purpose: Loads the superblock from the file, and deserializes
- * its information into the H5F_super_t structure.
+ * Purpose: Compute the size of the data structure on disk.
*
- * Return: Success: SUCCEED
- * Failure: NULL
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Mike McGreevy
- * mamcgree@hdfgroup.org
- * April 8, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 17, 2013
*
*-------------------------------------------------------------------------
*/
-static H5F_super_t *
-H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t H5_ATTR_UNUSED addr, void *_udata)
+static herr_t
+H5F__cache_superblock_get_load_size(const void H5_ATTR_UNUSED *udata, size_t *image_len)
{
- H5F_super_t *sblock = NULL; /* File's superblock */
- haddr_t base_addr = HADDR_UNDEF; /* Base address of file */
- uint8_t sbuf[H5F_MAX_SUPERBLOCK_SIZE]; /* Buffer for superblock */
- H5P_genplist_t *dxpl; /* DXPL object */
- H5P_genplist_t *c_plist; /* File creation property list */
- H5F_file_t *shared; /* shared part of `file' */
- H5FD_t *lf; /* file driver part of `shared' */
- haddr_t stored_eof; /* stored end-of-file address in file */
- haddr_t eof; /* end of file address */
- uint8_t sizeof_addr; /* Size of offsets in the file (in bytes) */
- uint8_t sizeof_size; /* Size of lengths in the file (in bytes) */
- const size_t fixed_size = H5F_SUPERBLOCK_FIXED_SIZE; /*fixed sizeof superblock */
- size_t variable_size; /*variable sizeof superblock */
- uint8_t *p; /* Temporary pointer into encoding buffer */
- unsigned super_vers; /* Superblock version */
- hbool_t *dirtied = (hbool_t *)_udata; /* Set up dirtied out value */
- H5F_super_t *ret_value; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_eq(addr, 0));
- HDassert(dirtied);
+ HDassert(image_len);
- /* Short cuts */
- shared = f->shared;
- lf = shared->lf;
+ /* Set the initial image length size */
+ *image_len = H5F_SUPERBLOCK_FIXED_SIZE + /* Fixed size of superblock */
+ H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE;
- /* Get the shared file creation property list */
- if(NULL == (c_plist = (H5P_genplist_t *)H5I_object(shared->fcpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "can't get property list")
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5F__cache_superblock_get_load_size() */
- /* Get the base address for the file in the VFD */
- if(HADDR_UNDEF == (base_addr = H5FD_get_base_addr(lf)))
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "failed to get base address for file driver")
+
+/*-------------------------------------------------------------------------
+ * Function: H5F__cache_superblock_deserialize
+ *
+ * Purpose: Loads an object from the disk.
+ *
+ * Return: Success: Pointer to new object
+ * Failure: NULL
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 18 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
+{
+ H5F_super_t *sblock = NULL; /* File's superblock */
+ H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ size_t variable_size; /* Sariable size of superblock */
+ unsigned super_vers; /* Superblock version */
+ uint8_t sizeof_addr; /* Size of offsets in the file (in bytes) */
+ uint8_t sizeof_size; /* Size of lengths in the file (in bytes) */
+ H5F_super_t *ret_value; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check arguments */
+ HDassert(image);
+ HDassert(udata);
+ HDassert(udata->f);
/* Allocate space for the superblock */
if(NULL == (sblock = H5FL_CALLOC(H5F_super_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- /* The superblock must be flushed last (and collectively in parallel) */
- sblock->cache_info.flush_me_last = TRUE;
-#ifdef H5_HAVE_PARALLEL
- sblock->cache_info.flush_me_collectively = TRUE;
-#endif
-
- /* Get the DXPL plist object for DXPL ID */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "can't get property list")
-
- /* Read fixed-size portion of the superblock */
- p = sbuf;
- H5_CHECK_OVERFLOW(fixed_size, size_t, haddr_t);
- if(H5FD_set_eoa(lf, H5FD_MEM_SUPER, (haddr_t)fixed_size) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "set end of space allocation request failed")
- if(H5FD_read(lf, dxpl, H5FD_MEM_SUPER, (haddr_t)0, fixed_size, p) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_READERROR, NULL, "unable to read superblock")
-
/* Skip over signature (already checked when locating the superblock) */
- p += H5F_SIGNATURE_LEN;
+ image += H5F_SIGNATURE_LEN;
/* Superblock version */
- super_vers = *p++;
+ super_vers = *image++;
if(super_vers > HDF5_SUPERBLOCK_VERSION_LATEST)
HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad superblock version number")
- if(H5P_set(c_plist, H5F_CRT_SUPER_VERS_NAME, &super_vers) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "unable to set superblock version")
/* Record the superblock version */
sblock->super_vers = super_vers;
/* Sanity check */
- HDassert(((size_t)(p - sbuf)) == fixed_size);
+ HDassert(((size_t)(image - (const uint8_t *)_image)) == H5F_SUPERBLOCK_FIXED_SIZE);
+ HDassert(len >= H5F_SUPERBLOCK_FIXED_SIZE + 6);
+
+ /* Determine the size of addresses & size of offsets, for computing the
+ * variable-sized portion of the superblock.
+ */
+ if(super_vers < HDF5_SUPERBLOCK_VERSION_2) {
+ sizeof_addr = image[4];
+ sizeof_size = image[5];
+ } /* end if */
+ else {
+ sizeof_addr = image[0];
+ sizeof_size = image[1];
+ } /* end else */
+ if(sizeof_addr != 2 && sizeof_addr != 4 &&
+ sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
+ if(sizeof_size != 2 && sizeof_size != 4 &&
+ sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
+ sblock->sizeof_addr = sizeof_addr;
+ sblock->sizeof_size = sizeof_size;
/* Determine the size of the variable-length part of the superblock */
- variable_size = (size_t)H5F_SUPERBLOCK_VARLEN_SIZE(super_vers, f);
+ variable_size = (size_t)H5F_SUPERBLOCK_VARLEN_SIZE(super_vers, sizeof_addr, sizeof_size);
HDassert(variable_size > 0);
- HDassert(fixed_size + variable_size <= sizeof(sbuf));
- /* Read in variable-sized portion of superblock */
- if(H5FD_set_eoa(lf, H5FD_MEM_SUPER, (haddr_t)(fixed_size + variable_size)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "set end of space allocation request failed")
- if(H5FD_read(lf, dxpl, H5FD_MEM_SUPER, (haddr_t)fixed_size, variable_size, p) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to read superblock")
+ /* Handle metadata cache retry for variable-sized portion of the superblock */
+ if(len != (H5F_SUPERBLOCK_FIXED_SIZE + variable_size)) {
+ /* Sanity check */
+ HDassert(len == (H5F_SUPERBLOCK_FIXED_SIZE + H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE));
- /* Check for older version of superblock format */
- if(super_vers < HDF5_SUPERBLOCK_VERSION_2) {
- uint32_t status_flags; /* File status flags */
- unsigned btree_k[H5B_NUM_BTREE_ID]; /* B-tree internal node 'K' values */
- unsigned sym_leaf_k; /* Symbol table leaf node's 'K' value */
-
- /* Freespace version (hard-wired) */
- if(HDF5_FREESPACE_VERSION != *p++)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad free space version number")
-
- /* Root group version number (hard-wired) */
- if(HDF5_OBJECTDIR_VERSION != *p++)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad object directory version number")
-
- /* Skip over reserved byte */
- p++;
-
- /* Shared header version number (hard-wired) */
- if(HDF5_SHAREDHEADER_VERSION != *p++)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad shared-header format version number")
-
- /* Size of file addresses */
- sizeof_addr = *p++;
- if(sizeof_addr != 2 && sizeof_addr != 4 &&
- sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
- if(H5P_set(c_plist, H5F_CRT_ADDR_BYTE_NUM_NAME, &sizeof_addr) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "unable to set byte number in an address")
- shared->sizeof_addr = sizeof_addr; /* Keep a local copy also */
-
- /* Size of file sizes */
- sizeof_size = *p++;
- if(sizeof_size != 2 && sizeof_size != 4 &&
- sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
- if(H5P_set(c_plist, H5F_CRT_OBJ_BYTE_NUM_NAME, &sizeof_size) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "unable to set byte number for object size")
- shared->sizeof_size = sizeof_size; /* Keep a local copy also */
-
- /* Skip over reserved byte */
- p++;
-
- /* Various B-tree sizes */
- UINT16DECODE(p, sym_leaf_k);
- if(sym_leaf_k == 0)
- HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, NULL, "bad symbol table leaf node 1/2 rank")
- if(H5P_set(c_plist, H5F_CRT_SYM_LEAF_NAME, &sym_leaf_k) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "unable to set rank for symbol table leaf nodes")
- sblock->sym_leaf_k = sym_leaf_k; /* Keep a local copy also */
-
- /* Need 'get' call to set other array values */
- if(H5P_get(c_plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "unable to get rank for btree internal nodes")
- UINT16DECODE(p, btree_k[H5B_SNODE_ID]);
- if(btree_k[H5B_SNODE_ID] == 0)
- HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, NULL, "bad 1/2 rank for btree internal nodes")
- /*
- * Delay setting the value in the property list until we've checked
- * for the indexed storage B-tree internal 'K' value later.
- */
-
- /* File status flags (not really used yet) */
- UINT32DECODE(p, status_flags);
- HDassert(status_flags <= 255);
- sblock->status_flags = (uint8_t)status_flags;
- if(sblock->status_flags & ~H5F_SUPER_ALL_FLAGS)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad flag value for superblock")
-
- /*
- * If the superblock version # is greater than 0, read in the indexed
- * storage B-tree internal 'K' value
- */
- if(super_vers > HDF5_SUPERBLOCK_VERSION_DEF) {
- UINT16DECODE(p, btree_k[H5B_CHUNK_ID]);
- /* Reserved bytes are present only in version 1 */
- if(super_vers == HDF5_SUPERBLOCK_VERSION_1)
- p += 2; /* reserved */
- } /* end if */
- else
- btree_k[H5B_CHUNK_ID] = HDF5_BTREE_CHUNK_IK_DEF;
+ /* Make certain we can read the variabled-sized portion of the superblock */
+ if(H5F__set_eoa(udata->f, H5FD_MEM_SUPER, (haddr_t)(H5F_SUPERBLOCK_FIXED_SIZE + variable_size)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "set end of space allocation request failed")
+ } /* end if */
+ else {
+ /* Check for older version of superblock format */
+ if(super_vers < HDF5_SUPERBLOCK_VERSION_2) {
+ uint32_t status_flags; /* File status flags */
+ unsigned sym_leaf_k; /* Symbol table leaf node's 'K' value */
+ unsigned snode_btree_k; /* B-tree symbol table internal node 'K' value */
+ unsigned chunk_btree_k; /* B-tree chunk internal node 'K' value */
+
+ /* Freespace version (hard-wired) */
+ if(HDF5_FREESPACE_VERSION != *image++)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad free space version number")
+
+ /* Root group version number (hard-wired) */
+ if(HDF5_OBJECTDIR_VERSION != *image++)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad object directory version number")
+
+ /* Skip over reserved byte */
+ image++;
+
+ /* Shared header version number (hard-wired) */
+ if(HDF5_SHAREDHEADER_VERSION != *image++)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad shared-header format version number")
+
+ /* Size of file addresses */
+ sizeof_addr = *image++;
+ if(sizeof_addr != 2 && sizeof_addr != 4 &&
+ sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
+ sblock->sizeof_addr = sizeof_addr;
+ udata->f->shared->sizeof_addr = sizeof_addr; /* Keep a local copy also */
+
+ /* Size of file sizes */
+ sizeof_size = *image++;
+ if(sizeof_size != 2 && sizeof_size != 4 &&
+ sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
+ sblock->sizeof_size = sizeof_size;
+ udata->f->shared->sizeof_size = sizeof_size; /* Keep a local copy also */
+
+ /* Skip over reserved byte */
+ image++;
+
+ /* Various B-tree sizes */
+ UINT16DECODE(image, sym_leaf_k);
+ if(sym_leaf_k == 0)
+ HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, NULL, "bad symbol table leaf node 1/2 rank")
+ udata->sym_leaf_k = sym_leaf_k; /* Keep a local copy also */
+
+ /* Need 'get' call to set other array values */
+ UINT16DECODE(image, snode_btree_k);
+ if(snode_btree_k == 0)
+ HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, NULL, "bad 1/2 rank for btree internal nodes")
+ udata->btree_k[H5B_SNODE_ID] = snode_btree_k;
- /* Set the B-tree internal node values, etc */
- if(H5P_set(c_plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "unable to set rank for btree internal nodes")
- HDmemcpy(sblock->btree_k, btree_k, sizeof(unsigned) * (size_t)H5B_NUM_BTREE_ID); /* Keep a local copy also */
+ /*
+ * Delay setting the value in the property list until we've checked
+ * for the indexed storage B-tree internal 'K' value later.
+ */
- /* Remainder of "variable-sized" portion of superblock */
- H5F_addr_decode(f, (const uint8_t **)&p, &sblock->base_addr/*out*/);
- H5F_addr_decode(f, (const uint8_t **)&p, &sblock->ext_addr/*out*/);
- H5F_addr_decode(f, (const uint8_t **)&p, &stored_eof/*out*/);
- H5F_addr_decode(f, (const uint8_t **)&p, &sblock->driver_addr/*out*/);
+ /* File status flags (not really used yet) */
+ UINT32DECODE(image, status_flags);
+ HDassert(status_flags <= 255);
+ sblock->status_flags = (uint8_t)status_flags;
+ if(sblock->status_flags & ~H5F_SUPER_ALL_FLAGS)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad flag value for superblock")
- /* Allocate space for the root group symbol table entry */
- HDassert(!sblock->root_ent);
- if(NULL == (sblock->root_ent = (H5G_entry_t *)H5MM_calloc(sizeof(H5G_entry_t))))
- HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, NULL, "can't allocate space for root group symbol table entry")
+ /*
+ * If the superblock version # is greater than 0, read in the indexed
+ * storage B-tree internal 'K' value
+ */
+ if(super_vers > HDF5_SUPERBLOCK_VERSION_DEF) {
+ UINT16DECODE(image, chunk_btree_k);
- /* decode the root group symbol table entry */
- if(H5G_ent_decode(f, (const uint8_t **)&p, sblock->root_ent) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "can't decode root group symbol table entry")
+ /* Reserved bytes are present only in version 1 */
+ if(super_vers == HDF5_SUPERBLOCK_VERSION_1)
+ image += 2; /* reserved */
+ } /* end if */
+ else
+ chunk_btree_k = HDF5_BTREE_CHUNK_IK_DEF;
+ udata->btree_k[H5B_CHUNK_ID] = chunk_btree_k;
+
+ /* Remainder of "variable-sized" portion of superblock */
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->base_addr/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->ext_addr/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &udata->stored_eof/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->driver_addr/*out*/);
+
+ /* Allocate space for the root group symbol table entry */
+ HDassert(!sblock->root_ent);
+ if(NULL == (sblock->root_ent = (H5G_entry_t *)H5MM_calloc(sizeof(H5G_entry_t))))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, NULL, "can't allocate space for root group symbol table entry")
+
+ /* decode the root group symbol table entry */
+ if(H5G_ent_decode(udata->f, (const uint8_t **)&image, sblock->root_ent) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "can't decode root group symbol table entry")
+
+ /* Set the root group address to the correct value */
+ sblock->root_addr = sblock->root_ent->header;
+
+ /* This step is for h5repart tool only. If user wants to change file driver
+ * from family to sec2 while using h5repart, set the driver address to
+ * undefined to let the library ignore the family driver information saved
+ * in the superblock.
+ */
+ if(udata->ignore_drvrinfo && H5F_addr_defined(sblock->driver_addr)) {
+ /* Eliminate the driver info */
+ sblock->driver_addr = HADDR_UNDEF;
+ udata->drvrinfo_removed = TRUE;
+ } /* end if */
- /* Set the root group address to the correct value */
- sblock->root_addr = sblock->root_ent->header;
+ /* NOTE: Driver info block is decoded separately, later */
- /*
- * Check if superblock address is different from base address and
- * adjust base address and "end of address" address if so.
- */
- if(!H5F_addr_eq(base_addr, sblock->base_addr)) {
- /* Check if the superblock moved earlier in the file */
- if(H5F_addr_lt(base_addr, sblock->base_addr))
- stored_eof -= (sblock->base_addr - base_addr);
- else
- /* The superblock moved later in the file */
- stored_eof += (base_addr - sblock->base_addr);
+ } /* end if */
+ else {
+ uint32_t computed_chksum; /* Computed checksum */
+ uint32_t read_chksum; /* Checksum read from file */
+
+ /* Size of file addresses */
+ sizeof_addr = *image++;
+ if(sizeof_addr != 2 && sizeof_addr != 4 &&
+ sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
+ sblock->sizeof_addr = sizeof_addr;
+ udata->f->shared->sizeof_addr = sizeof_addr; /* Keep a local copy also */
+
+ /* Size of file sizes */
+ sizeof_size = *image++;
+ if(sizeof_size != 2 && sizeof_size != 4 &&
+ sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
+ sblock->sizeof_size = sizeof_size;
+ udata->f->shared->sizeof_size = sizeof_size; /* Keep a local copy also */
+
+ /* File status flags (not really used yet) */
+ sblock->status_flags = *image++;
+ if(sblock->status_flags & ~H5F_SUPER_ALL_FLAGS)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad flag value for superblock")
+
+ /* Base, superblock extension, end of file & root group object header addresses */
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->base_addr/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->ext_addr/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &udata->stored_eof/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->root_addr/*out*/);
+
+ /* Compute checksum for superblock */
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+
+ /* Decode checksum */
+ UINT32DECODE(image, read_chksum);
+
+ /* Verify correct checksum */
+ if(read_chksum != computed_chksum)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "bad checksum on driver information block")
+
+ /* The Driver Information Block may not appear with the version
+ * 2 super block. Thus we set the driver_addr field of the in
+ * core representation of the super block HADDR_UNDEF to prevent
+ * any attempt to load the Driver Information Block.
+ */
+ sblock->driver_addr = HADDR_UNDEF;
+ } /* end else */
+ } /* end else */
- /* Adjust base address for offsets of the HDF5 data in the file */
- sblock->base_addr = base_addr;
+ /* Sanity check */
+ HDassert((size_t)(image - (const uint8_t *)_image) <= len);
- /* Set the base address for the file in the VFD now */
- if(H5FD_set_base_addr(lf, sblock->base_addr) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTSET, NULL, "failed to set base address for file driver")
+ /* Set return value */
+ ret_value = sblock;
- /* Indicate that the superblock should be marked dirty */
- *dirtied = TRUE;
- } /* end if */
+done:
+ /* Release the [possibly partially initialized] superblock on error */
+ if(!ret_value && sblock)
+ if(H5F__super_free(sblock) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTFREE, NULL, "unable to destroy superblock data")
- /* This step is for h5repart tool only. If user wants to change file driver
- * from family to sec2 while using h5repart, set the driver address to
- * undefined to let the library ignore the family driver information saved
- * in the superblock.
- */
- if(H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
- /* Eliminate the driver info */
- sblock->driver_addr = HADDR_UNDEF;
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F__cache_superblock_deserialize() */
- /* Indicate that the superblock should be marked dirty */
- *dirtied = TRUE;
- } /* end if */
+
+/*-------------------------------------------------------------------------
+ * Function: H5F__cache_superblock_image_len
+ *
+ * Purpose: Compute the size of the data structure on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 19, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F__cache_superblock_image_len(const void *_thing, size_t *image_len, hbool_t *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
+{
+ const H5F_super_t *sblock = (const H5F_super_t *)_thing; /* Pointer to the object */
- /* Decode the optional driver information block */
- if(H5F_addr_defined(sblock->driver_addr)) {
- uint8_t dbuf[H5F_MAX_DRVINFOBLOCK_SIZE]; /* Buffer for driver info block */
- char drv_name[9]; /* Name of driver */
- unsigned drv_vers; /* Version of driver info block */
- size_t drv_variable_size; /* Size of variable-length portion of driver info block, in bytes */
-
- /* Read in fixed-sized portion of driver info block */
- p = dbuf;
- if(H5FD_set_eoa(lf, H5FD_MEM_SUPER, sblock->driver_addr + H5F_DRVINFOBLOCK_HDR_SIZE) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "set end of space allocation request failed")
- if(H5FD_read(lf, dxpl, H5FD_MEM_SUPER, sblock->driver_addr, (size_t)H5F_DRVINFOBLOCK_HDR_SIZE, p) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to read driver information block")
-
- /* Version number */
- drv_vers = *p++;
- if(drv_vers != HDF5_DRIVERINFO_VERSION_0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "bad driver information block version number")
-
- p += 3; /* reserved bytes */
-
- /* Driver info size */
- UINT32DECODE(p, drv_variable_size);
-
- /* Sanity check */
- HDassert(H5F_DRVINFOBLOCK_HDR_SIZE + drv_variable_size <= sizeof(dbuf));
-
- /* Driver name and/or version */
- HDstrncpy(drv_name, (const char *)p, (size_t)8);
- drv_name[8] = '\0';
- p += 8; /* advance past name/version */
-
- /* Read in variable-sized portion of driver info block */
- if(H5FD_set_eoa(lf, H5FD_MEM_SUPER, sblock->driver_addr + H5F_DRVINFOBLOCK_HDR_SIZE + drv_variable_size) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "set end of space allocation request failed")
- if(H5FD_read(lf, dxpl, H5FD_MEM_SUPER, sblock->driver_addr + H5F_DRVINFOBLOCK_HDR_SIZE, drv_variable_size, p) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to read file driver information")
-
- /* Decode driver information */
- if(H5FD_sb_load(lf, drv_name, p) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to decode driver information")
- } /* end if */
- } /* end if */
- else {
- uint32_t computed_chksum; /* Computed checksum */
- uint32_t read_chksum; /* Checksum read from file */
-
- /* Size of file addresses */
- sizeof_addr = *p++;
- if(sizeof_addr != 2 && sizeof_addr != 4 &&
- sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
- if(H5P_set(c_plist, H5F_CRT_ADDR_BYTE_NUM_NAME, &sizeof_addr) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "unable to set byte number in an address")
- shared->sizeof_addr = sizeof_addr; /* Keep a local copy also */
-
- /* Size of file sizes */
- sizeof_size = *p++;
- if(sizeof_size != 2 && sizeof_size != 4 &&
- sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
- if(H5P_set(c_plist, H5F_CRT_OBJ_BYTE_NUM_NAME, &sizeof_size) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "unable to set byte number for object size")
- shared->sizeof_size = sizeof_size; /* Keep a local copy also */
-
- /* File status flags (not really used yet) */
- sblock->status_flags = *p++;
- if(sblock->status_flags & ~H5F_SUPER_ALL_FLAGS)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad flag value for superblock")
-
- /* Base, superblock extension, end of file & root group object header addresses */
- H5F_addr_decode(f, (const uint8_t **)&p, &sblock->base_addr/*out*/);
- H5F_addr_decode(f, (const uint8_t **)&p, &sblock->ext_addr/*out*/);
- H5F_addr_decode(f, (const uint8_t **)&p, &stored_eof/*out*/);
- H5F_addr_decode(f, (const uint8_t **)&p, &sblock->root_addr/*out*/);
-
- /* Compute checksum for superblock */
- computed_chksum = H5_checksum_metadata(sbuf, (size_t)(p - sbuf), 0);
-
- /* Decode checksum */
- UINT32DECODE(p, read_chksum);
-
- /* Verify correct checksum */
- if(read_chksum != computed_chksum)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "bad checksum on driver information block")
+ FUNC_ENTER_STATIC_NOERR
- /*
- * Check if superblock address is different from base address and
- * adjust base address and "end of address" address if so.
- */
- if(!H5F_addr_eq(base_addr, sblock->base_addr)) {
- /* Check if the superblock moved earlier in the file */
- if(H5F_addr_lt(base_addr, sblock->base_addr))
- stored_eof -= (sblock->base_addr - base_addr);
- else
- /* The superblock moved later in the file */
- stored_eof += (base_addr - sblock->base_addr);
+ /* Check arguments */
+ HDassert(sblock);
+ HDassert(sblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(sblock->cache_info.type == H5AC_SUPERBLOCK);
+ HDassert(image_len);
+ HDassert(compressed_ptr);
- /* Adjust base address for offsets of the HDF5 data in the file */
- sblock->base_addr = base_addr;
+ /* Set the image length size */
+ *image_len = (size_t)H5F_SUPERBLOCK_SIZE(sblock);
- /* Set the base address for the file in the VFD now */
- if(H5FD_set_base_addr(lf, sblock->base_addr) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTSET, NULL, "failed to set base address for file driver")
+ /* Set *compressed_ptr to FALSE unconditionally */
+ *compressed_ptr;
- /* Indicate that the superblock should be marked dirty */
- *dirtied = TRUE;
- } /* end if */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5F__cache_superblock_image_len() */
- /* Get the B-tree internal node values, etc */
- if(H5P_get(c_plist, H5F_CRT_BTREE_RANK_NAME, sblock->btree_k) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "unable to get rank for btree internal nodes")
- if(H5P_get(c_plist, H5F_CRT_SYM_LEAF_NAME, &sblock->sym_leaf_k) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "unable to get rank for btree internal nodes")
- } /* end else */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FS__cache_hdf_pre_serialize
+ *
+ * Purpose: The current use of this function is a cludge to repair an
+ * oversight in the conversion of the superblock code to use the
+ * version 3 cache.
+ *
+ * In the V2 metadata cache callbacks, the superblock dirver info
+ * message was updated in the flush routine. Note that this
+ * operation only applies to version 2 or later superblocks.
+ *
+ * Somehow, this functionality was lost in the conversion to use
+ * the V3 cache, causing failures with the multi file driver
+ * (and possibly the family file driver as well).
+ *
+ * Performing this operation is impossible in the current
+ * serialize routine, as the dxpl_id is not available. While
+ * I am pretty sure that this is not the correct place for this
+ * functionality, as I can see it causing problems with both
+ * journaling and possibly parallel HDF5 as well, I am placing
+ * code for the necessary update in the pre_serialize call for
+ * now for testing purposes. We will almost certainly want to
+ * change this.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 10/82/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F__cache_superblock_pre_serialize(const H5F_t *f, hid_t dxpl_id,
+ void *_thing, haddr_t H5_ATTR_UNUSED addr, size_t H5_ATTR_UNUSED len,
+ size_t H5_ATTR_UNUSED compressed_len, haddr_t H5_ATTR_UNUSED *new_addr,
+ size_t H5_ATTR_UNUSED *new_len, size_t H5_ATTR_UNUSED *new_compressed_len,
+ unsigned H5_ATTR_UNUSED *flags)
+{
+ H5F_super_t *sblock = (H5F_super_t *)_thing; /* Pointer to the super block */
+ herr_t ret_value = SUCCEED; /* Return value */
- /*
- * The user-defined data is the area of the file before the base
- * address.
- */
- if(H5P_set(c_plist, H5F_CRT_USER_BLOCK_NAME, &sblock->base_addr) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "unable to set userblock size")
+ FUNC_ENTER_NOAPI_NOINIT
- /*
- * Make sure that the data is not truncated. One case where this is
- * possible is if the first file of a family of files was opened
- * individually.
- */
- if(HADDR_UNDEF == (eof = H5FD_get_eof(lf, H5FD_MEM_DEFAULT)))
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "unable to determine file size")
-
- /* (Account for the stored EOF being absolute offset -QAK) */
- if((eof + sblock->base_addr) < stored_eof)
- HGOTO_ERROR(H5E_FILE, H5E_TRUNCATED, NULL,
- "truncated file: eof = %llu, sblock->base_addr = %llu, stored_eof = %llu",
- (unsigned long long)eof, (unsigned long long)sblock->base_addr, (unsigned long long)stored_eof)
-
- /*
- * Tell the file driver how much address space has already been
- * allocated so that it knows how to allocate additional memory.
- */
- /* (Account for the stored EOA being absolute offset -NAF) */
- if(H5FD_set_eoa(lf, H5FD_MEM_SUPER, stored_eof - sblock->base_addr) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to set end-of-address marker for file")
-
- /* Read the file's superblock extension, if there is one. */
- if(H5F_addr_defined(sblock->ext_addr)) {
- H5O_loc_t ext_loc; /* "Object location" for superblock extension */
- H5O_btreek_t btreek; /* v1 B-tree 'K' value message from superblock extension */
- H5O_drvinfo_t drvinfo; /* Driver info message from superblock extension */
- size_t u; /* Local index variable */
- htri_t status; /* Status for message existing */
-
- /* Sanity check - superblock extension should only be defined for
- * superblock version >= 2.
+ /* Sanity check */
+ HDassert(f);
+ HDassert(sblock);
+ HDassert(sblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(sblock->cache_info.type == H5AC_SUPERBLOCK);
+ HDassert(flags);
+
+ if(sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2) {
+ /* WARNING: This code almost certainly doesn't belong here. Must
+ * discuss with Quincey where to put it. Note issues
+ * for journaling and possibly parallel.
+ *
+ * -- JRM
*/
- HDassert(super_vers >= HDF5_SUPERBLOCK_VERSION_2);
-
- /* Check for superblock extension being located "outside" the stored
- * 'eoa' value, which can occur with the split/multi VFD.
+ /* Update the driver information message in the superblock extension
+ * if appropriate.
*/
- if(H5F_addr_gt(sblock->ext_addr, stored_eof)) {
- /* Set the 'eoa' for the object header memory type large enough
- * to give some room for a reasonably sized superblock extension.
- * (This is _rather_ a kludge -QAK)
- */
- if(H5FD_set_eoa(lf, H5FD_MEM_OHDR, (haddr_t)(sblock->ext_addr + 1024)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to set end-of-address marker for file")
- } /* end if */
+ if(H5F_addr_defined(sblock->ext_addr)) {
+ size_t driver_size; /* Size of driver info block (bytes)*/
+ H5O_loc_t ext_loc; /* "Object location" for superblock extension */
- /* Open the superblock extension */
- if(H5F_super_ext_open(f, sblock->ext_addr, &ext_loc) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, NULL, "unable to open file's superblock extension")
+ HDassert(sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2);
+
+ /* Open the superblock extension's object header */
+ if(H5F_super_ext_open((H5F_t *)f, sblock->ext_addr, &ext_loc) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "unable to open file's superblock extension")
- /* Check for the extension having a 'driver info' message */
- if((status = H5O_msg_exists(&ext_loc, H5O_DRVINFO_ID, dxpl_id)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "unable to read object header")
- if(status) {
/* Check for ignoring the driver info for this file */
- if(H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
- /* Indicate that the superblock should be marked dirty */
- *dirtied = TRUE;
- } /* end if */
- else {
- /* Retrieve the 'driver info' structure */
- if(NULL == H5O_msg_read(&ext_loc, H5O_DRVINFO_ID, &drvinfo, dxpl_id))
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "driver info message not present")
-
- /* Decode driver information */
- if(H5FD_sb_load(lf, drvinfo.name, drvinfo.buf) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to decode driver information")
-
- /* Reset driver info message */
- H5O_msg_reset(H5O_DRVINFO_ID, &drvinfo);
- } /* end else */
- } /* end if */
+ if(!H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
+ /* Check for driver info message */
+ H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
+ if(driver_size > 0) {
+ H5O_drvinfo_t drvinfo; /* Driver info */
+ uint8_t dbuf[H5F_MAX_DRVINFOBLOCK_SIZE]; /* Driver info block encoding buffer */
- /* Read in the shared OH message information if there is any */
- if(H5SM_get_info(&ext_loc, c_plist, dxpl_id) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to read SOHM table information")
-
- /* Check for the extension having a 'v1 B-tree "K"' message */
- if((status = H5O_msg_exists(&ext_loc, H5O_BTREEK_ID, dxpl_id)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "unable to read object header")
- if(status) {
- /* Retrieve the 'v1 B-tree "K"' structure */
- if(NULL == H5O_msg_read(&ext_loc, H5O_BTREEK_ID, &btreek, dxpl_id))
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "v1 B-tree 'K' info message not present")
-
- /* Set non-default v1 B-tree 'K' value info from file */
- sblock->btree_k[H5B_CHUNK_ID] = btreek.btree_k[H5B_CHUNK_ID];
- sblock->btree_k[H5B_SNODE_ID] = btreek.btree_k[H5B_SNODE_ID];
- sblock->sym_leaf_k = btreek.sym_leaf_k;
-
- /* Set non-default v1 B-tree 'K' values in the property list */
- if(H5P_set(c_plist, H5F_CRT_BTREE_RANK_NAME, btreek.btree_k) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "unable to set rank for btree internal nodes")
- if(H5P_set(c_plist, H5F_CRT_SYM_LEAF_NAME, &btreek.sym_leaf_k) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "unable to set rank for symbol table leaf nodes")
- } /* end if */
+ /* Sanity check */
+ HDassert(driver_size <= H5F_MAX_DRVINFOBLOCK_SIZE);
- /* Check for the extension having a 'free-space manager info' message */
- if((status = H5O_msg_exists(&ext_loc, H5O_FSINFO_ID, dxpl_id)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "unable to check object header")
- if(status) {
- H5O_fsinfo_t fsinfo; /* Free-space manager info message from superblock extension */
-
- /* Retrieve the 'free-space manager info' structure */
- if(NULL == H5O_msg_read(&ext_loc, H5O_FSINFO_ID, &fsinfo, dxpl_id))
- HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "unable to get free-space manager info message")
-
- if(shared->fs_strategy != fsinfo.strategy) {
- shared->fs_strategy = fsinfo.strategy;
-
- /* Set non-default strategy in the property list */
- if(H5P_set(c_plist, H5F_CRT_FILE_SPACE_STRATEGY_NAME, &fsinfo.strategy) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTSET, NULL, "unable to set file space strategy")
- } /* end if */
- if(shared->fs_threshold != fsinfo.threshold) {
- shared->fs_threshold = fsinfo.threshold;
-
- /* Set non-default threshold in the property list */
- if(H5P_set(c_plist, H5F_CRT_FREE_SPACE_THRESHOLD_NAME, &fsinfo.threshold) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTSET, NULL, "unable to set file space strategy")
- } /* end if */
-
- /* set free-space manager addresses */
- shared->fs_addr[0] = HADDR_UNDEF;
- for(u = 1; u < NELMTS(f->shared->fs_addr); u++)
- shared->fs_addr[u] = fsinfo.fs_addr[u-1];
- } /* end if */
+ /* Encode driver-specific data */
+ if(H5FD_sb_encode(f->shared->lf, drvinfo.name, dbuf) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to encode driver information")
- /* Close superblock extension */
- if(H5F_super_ext_close(f, &ext_loc, dxpl_id, FALSE) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTRELEASE, NULL, "unable to close file's superblock extension")
- } /* end if */
+ /* Write driver info information to the superblock extension */
+ drvinfo.len = driver_size;
+ drvinfo.buf = dbuf;
+ if(H5O_msg_write(&ext_loc, H5O_DRVINFO_ID, H5O_MSG_FLAG_DONTSHARE, H5O_UPDATE_TIME, &drvinfo, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "unable to update driver info header message")
+ } /* end if */
+ } /* end if */
- /* Set return value */
- ret_value = sblock;
+ /* Close the superblock extension object header */
+ if(H5F_super_ext_close((H5F_t *)f, &ext_loc, dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEOBJ, FAIL, "unable to close file's superblock extension")
+ } /* end if */
+ } /* end if */
done:
- /* Release the [possibly partially initialized] superblock on errors */
- if(!ret_value && sblock)
- if(H5F__super_free(sblock) < 0)
- HDONE_ERROR(H5E_FILE, H5E_CANTFREE, NULL, "unable to destroy superblock data")
-
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5F_sblock_load() */
+} /* end H5FS_cache_superblock_pre_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5F_sblock_flush
+ * Function: H5F__cache_superblock_serialize
*
- * Purpose: Flushes the superblock.
+ * Purpose: Flushes a dirty object to disk.
*
- * Return: Success: SUCCEED
- * Failure: NULL
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Mike McGreevy
- * mamcgree@hdfgroup.org
- * April 8, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 19 2013
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5F_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t H5_ATTR_UNUSED addr,
- H5F_super_t *sblock)
+H5F__cache_superblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED;
+ H5F_super_t *sblock = (H5F_super_t *)_thing; /* Pointer to the object */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ haddr_t rel_eof; /* Relative EOF for file */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* check arguments */
+ /* Sanity check */
HDassert(f);
- HDassert(H5F_addr_eq(addr, 0));
+ HDassert(image);
HDassert(sblock);
-
+
/* Assert that the superblock is marked as being flushed last (and
collectively in parallel) */
/* (We'll rely on the cache to make sure it actually *is* flushed
@@ -656,301 +619,403 @@ H5F_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t H5_ATTR_UNUSE
HDassert(sblock->cache_info.flush_me_collectively);
#endif
- if(sblock->cache_info.is_dirty) {
- H5P_genplist_t *dxpl; /* DXPL object */
- uint8_t buf[H5F_MAX_SUPERBLOCK_SIZE + H5F_MAX_DRVINFOBLOCK_SIZE]; /* Superblock & driver info blockencoding buffer */
- uint8_t *p; /* Ptr into encoding buffer */
- haddr_t rel_eof; /* Relative EOF for file */
- size_t superblock_size; /* Size of superblock, in bytes */
- size_t driver_size; /* Size of driver info block (bytes)*/
+ /* Encode the common portion of the file superblock for all versions */
+ HDmemcpy(image, H5F_SIGNATURE, (size_t)H5F_SIGNATURE_LEN);
+ image += H5F_SIGNATURE_LEN;
+ *image++ = (uint8_t)sblock->super_vers;
- /* Encode the common portion of the file superblock for all versions */
- p = buf;
- HDmemcpy(p, H5F_SIGNATURE, (size_t)H5F_SIGNATURE_LEN);
- p += H5F_SIGNATURE_LEN;
- *p++ = (uint8_t)sblock->super_vers;
+ /* Check for older version of superblock format */
+ if(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2) {
+ *image++ = (uint8_t)HDF5_FREESPACE_VERSION; /* (hard-wired) */
+ *image++ = (uint8_t)HDF5_OBJECTDIR_VERSION; /* (hard-wired) */
+ *image++ = 0; /* reserved*/
- /* Check for older version of superblock format */
- if(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2) {
- *p++ = (uint8_t)HDF5_FREESPACE_VERSION; /* (hard-wired) */
- *p++ = (uint8_t)HDF5_OBJECTDIR_VERSION; /* (hard-wired) */
- *p++ = 0; /* reserved*/
+ *image++ = (uint8_t)HDF5_SHAREDHEADER_VERSION; /* (hard-wired) */
+ *image++ = sblock->sizeof_addr;
+ *image++ = sblock->sizeof_size;
+ *image++ = 0; /* reserved */
- *p++ = (uint8_t)HDF5_SHAREDHEADER_VERSION; /* (hard-wired) */
- *p++ = (uint8_t)H5F_SIZEOF_ADDR(f);
- *p++ = (uint8_t)H5F_SIZEOF_SIZE(f);
- *p++ = 0; /* reserved */
+ UINT16ENCODE(image, sblock->sym_leaf_k);
+ UINT16ENCODE(image, sblock->btree_k[H5B_SNODE_ID]);
+ UINT32ENCODE(image, (uint32_t)sblock->status_flags);
- UINT16ENCODE(p, sblock->sym_leaf_k);
- UINT16ENCODE(p, sblock->btree_k[H5B_SNODE_ID]);
- UINT32ENCODE(p, (uint32_t)sblock->status_flags);
+ /*
+ * Versions of the superblock >0 have the indexed storage B-tree
+ * internal 'K' value stored
+ */
+ if(sblock->super_vers > HDF5_SUPERBLOCK_VERSION_DEF) {
+ UINT16ENCODE(image, sblock->btree_k[H5B_CHUNK_ID]);
+ *image++ = 0; /*reserved */
+ *image++ = 0; /*reserved */
+ } /* end if */
- /*
- * Versions of the superblock >0 have the indexed storage B-tree
- * internal 'K' value stored
- */
- if(sblock->super_vers > HDF5_SUPERBLOCK_VERSION_DEF) {
- UINT16ENCODE(p, sblock->btree_k[H5B_CHUNK_ID]);
- *p++ = 0; /*reserved */
- *p++ = 0; /*reserved */
- } /* end if */
+ /* Encode the base address */
+ H5F_addr_encode(f, &image, sblock->base_addr);
- /* Encode the base address */
- H5F_addr_encode(f, &p, sblock->base_addr);
-
- /* Encode the address of global free-space index */
- H5F_addr_encode(f, &p, sblock->ext_addr);
-
- /* Encode the end-of-file address. Note that at this point in time,
- * the EOF value itself may not be reflective of the file's size, as
- * we will eventually truncate the file to match the EOA value. As
- * such, use the EOA value in its place, knowing that the current EOF
- * value will ultimately match it. */
- if ((rel_eof = H5FD_get_eoa(f->shared->lf, H5FD_MEM_SUPER)) == HADDR_UNDEF)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
- H5F_addr_encode(f, &p, (rel_eof + sblock->base_addr));
-
- /* Encode the driver informaton block address */
- H5F_addr_encode(f, &p, sblock->driver_addr);
-
- /* Encode the root group object entry, including the cached stab info */
- if(H5G_ent_encode(f, &p, sblock->root_ent) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTENCODE, FAIL, "can't encode root group symbol table entry")
-
- /* Encode the driver information block. */
- H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
-
- /* Checking whether driver block address is defined here is to handle backward
- * compatibility. If the file was created with v1.6 library or earlier and no
- * driver info block was written in the superblock, we don't write it either even
- * though there's some driver info. Otherwise, the driver block extended will
- * overwrite the (meta)data right after the superblock. This situation happens to
- * the family driver particularly. SLU - 2009/3/24
- */
- if(driver_size > 0 && H5F_addr_defined(sblock->driver_addr)) {
- char driver_name[9]; /* Name of driver, for driver info block */
- uint8_t *dbuf = p; /* Pointer to beginning of driver info */
+ /* Encode the address of global free-space index */
+ H5F_addr_encode(f, &image, sblock->ext_addr);
- /* Encode the driver information block */
- *p++ = HDF5_DRIVERINFO_VERSION_0; /* Version */
- *p++ = 0; /* reserved */
- *p++ = 0; /* reserved */
- *p++ = 0; /* reserved */
+ /* Encode the end-of-file address. Note that at this point in time,
+ * the EOF value itself may not be reflective of the file's size, as
+ * we will eventually truncate the file to match the EOA value. As
+ * such, use the EOA value in its place, knowing that the current EOF
+ * value will ultimately match it. */
+ if ((rel_eof = H5FD_get_eoa(f->shared->lf, H5FD_MEM_SUPER)) == HADDR_UNDEF)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+ H5F_addr_encode(f, &image, (rel_eof + sblock->base_addr));
- /* Driver info size, excluding header */
- UINT32ENCODE(p, driver_size);
+ /* Encode the driver informaton block address */
+ H5F_addr_encode(f, &image, sblock->driver_addr);
- /* Encode driver-specific data */
- if(H5FD_sb_encode(f->shared->lf, driver_name, dbuf + H5F_DRVINFOBLOCK_HDR_SIZE) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to encode driver information")
+ /* Encode the root group object entry, including the cached stab info */
+ if(H5G_ent_encode(f, &image, sblock->root_ent) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTENCODE, FAIL, "can't encode root group symbol table entry")
- /* Store driver name (set in 'H5FD_sb_encode' call above) */
- HDmemcpy(p, driver_name, (size_t)8);
+ /* NOTE: Driver info block is handled separately */
- /* Advance buffer pointer past name & variable-sized portion of driver info */
- /* (for later use in computing the superblock size) */
- p += 8 + driver_size;
- } /* end if */
- } /* end if */
- else {
- uint32_t chksum; /* Checksum temporary variable */
- H5O_loc_t *root_oloc; /* Pointer to root group's object location */
+ } /* end if */
+ else { /* sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2 */
+ uint32_t chksum; /* Checksum temporary variable */
+ H5O_loc_t *root_oloc; /* Pointer to root group's object location */
- /* Size of file addresses & offsets, and status flags */
- *p++ = (uint8_t)H5F_SIZEOF_ADDR(f);
- *p++ = (uint8_t)H5F_SIZEOF_SIZE(f);
- *p++ = sblock->status_flags;
+ /* Size of file addresses & offsets, and status flags */
+ *image++ = sblock->sizeof_addr;
+ *image++ = sblock->sizeof_size;
+ *image++ = sblock->status_flags;
- /* Encode the base address */
- H5F_addr_encode(f, &p, sblock->base_addr);
+ /* Encode the base address */
+ H5F_addr_encode(f, &image, sblock->base_addr);
- /* Encode the address of the superblock extension */
- H5F_addr_encode(f, &p, sblock->ext_addr);
+ /* Encode the address of the superblock extension */
+ H5F_addr_encode(f, &image, sblock->ext_addr);
- /* At this point in time, the EOF value itself may
- * not be reflective of the file's size, since we'll eventually
- * truncate it to match the EOA value. As such, use the EOA value
- * in its place, knowing that the current EOF value will
- * ultimately match it. */
- if ((rel_eof = H5FD_get_eoa(f->shared->lf, H5FD_MEM_SUPER)) == HADDR_UNDEF)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
- H5F_addr_encode(f, &p, (rel_eof + sblock->base_addr));
+ /* At this point in time, the EOF value itself may
+ * not be reflective of the file's size, since we'll eventually
+ * truncate it to match the EOA value. As such, use the EOA value
+ * in its place, knowing that the current EOF value will
+ * ultimately match it. */
+ if ((rel_eof = H5FD_get_eoa(f->shared->lf, H5FD_MEM_SUPER)) == HADDR_UNDEF)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+ H5F_addr_encode(f, &image, (rel_eof + sblock->base_addr));
- /* Retrieve information for root group */
- if(NULL == (root_oloc = H5G_oloc(f->shared->root_grp)))
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to retrieve root group information")
+ /* Retrieve information for root group */
+ if(NULL == (root_oloc = H5G_oloc(f->shared->root_grp)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to retrieve root group information")
- /* Encode address of root group's object header */
- H5F_addr_encode(f, &p, root_oloc->addr);
+ /* Encode address of root group's object header */
+ H5F_addr_encode(f, &image, root_oloc->addr);
- /* Compute superblock checksum */
- chksum = H5_checksum_metadata(buf, ((size_t)H5F_SUPERBLOCK_SIZE(sblock->super_vers, f) - H5F_SIZEOF_CHKSUM), 0);
+ /* Compute superblock checksum */
+ chksum = H5_checksum_metadata(_image, ((size_t)H5F_SUPERBLOCK_SIZE(sblock) - H5F_SIZEOF_CHKSUM), 0);
- /* Superblock checksum */
- UINT32ENCODE(p, chksum);
+ /* Superblock checksum */
+ UINT32ENCODE(image, chksum);
- /* Sanity check */
- HDassert((size_t)(p - buf) == (size_t)H5F_SUPERBLOCK_SIZE(sblock->super_vers, f));
- } /* end else */
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) == (size_t)H5F_SUPERBLOCK_SIZE(sblock));
+ } /* end else */
- /* Retrieve the total size of the superblock info */
- H5_CHECKED_ASSIGN(superblock_size, size_t, (p - buf), ptrdiff_t);
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
- /* Double check we didn't overrun the block (unlikely) */
- HDassert(superblock_size <= sizeof(buf));
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5F__cache_superblock_serialize() */
- /* Get the DXPL plist object for DXPL ID */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+
+/*-------------------------------------------------------------------------
+ * Function: H5F__cache_superblock_free_icr
+ *
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
+ *
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 20, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F__cache_superblock_free_icr(void *_thing)
+{
+ H5F_super_t *sblock = (H5F_super_t *)_thing; /* Pointer to the object */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Write superblock */
- /* (always at relative address 0) */
- if(H5FD_write(f->shared->lf, dxpl, H5FD_MEM_SUPER, (haddr_t)0, superblock_size, buf) < 0)
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write superblock")
+ FUNC_ENTER_STATIC
- /* Check for newer version of superblock format & superblock extension */
- if(sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2 && H5F_addr_defined(sblock->ext_addr)) {
- H5O_loc_t ext_loc; /* "Object location" for superblock extension */
+ /* Sanity check */
+ HDassert(sblock);
+ HDassert(sblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(sblock->cache_info.type == H5AC_SUPERBLOCK);
- /* Open the superblock extension's object header */
- if(H5F_super_ext_open(f, sblock->ext_addr, &ext_loc) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "unable to open file's superblock extension")
+ /* Destroy superblock */
+ if(H5F__super_free(sblock) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free superblock")
- /* Check for ignoring the driver info for this file */
- if(!H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
- /* Check for driver info message */
- H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
- if(driver_size > 0) {
- H5O_drvinfo_t drvinfo; /* Driver info */
- uint8_t dbuf[H5F_MAX_DRVINFOBLOCK_SIZE]; /* Driver info block encoding buffer */
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5F__cache_superblock_free_icr() */
- /* Sanity check */
- HDassert(driver_size <= H5F_MAX_DRVINFOBLOCK_SIZE);
+
+/*-------------------------------------------------------------------------
+ * Function: H5F__cache_drvrinfo_get_load_size
+ *
+ * Purpose: Compute the size of the data structure on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 20, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F__cache_drvrinfo_get_load_size(const void H5_ATTR_UNUSED *udata, size_t *image_len)
+{
+ FUNC_ENTER_STATIC_NOERR
- /* Encode driver-specific data */
- if(H5FD_sb_encode(f->shared->lf, drvinfo.name, dbuf) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to encode driver information")
+ /* Check arguments */
+ HDassert(image_len);
- /* Write driver info information to the superblock extension */
- drvinfo.len = driver_size;
- drvinfo.buf = dbuf;
- if(H5O_msg_write(&ext_loc, H5O_DRVINFO_ID, H5O_MSG_FLAG_DONTSHARE, H5O_UPDATE_TIME, &drvinfo, dxpl_id) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "unable to update driver info header message")
+ /* Set the initial image length size */
+ *image_len = H5F_DRVINFOBLOCK_HDR_SIZE; /* Fixed size portion of driver info block */
- } /* end if */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5F__cache_drvrinfo_get_load_size() */
- } /* end if */
+
+/*-------------------------------------------------------------------------
+ * Function: H5F__cache_drvrinfo_deserialize
+ *
+ * Purpose: Loads an object from the disk.
+ *
+ * Return: Success: Pointer to a new B-tree.
+ * Failure: NULL
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 20 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5F__cache_drvrinfo_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
+{
+ H5O_drvinfo_t *drvinfo = NULL; /* Driver info */
+ H5F_drvrinfo_cache_ud_t *udata = (H5F_drvrinfo_cache_ud_t *)_udata; /* User data */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ char drv_name[9]; /* Name of driver */
+ unsigned drv_vers; /* Version of driver info block */
+ H5O_drvinfo_t *ret_value; /* Return value */
- /* Close the superblock extension object header */
- if(H5F_super_ext_close(f, &ext_loc, dxpl_id, FALSE) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEOBJ, FAIL, "unable to close file's superblock extension")
- } /* end if */
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(image);
+ HDassert(len >= H5F_DRVINFOBLOCK_HDR_SIZE);
+ HDassert(udata);
+ HDassert(udata->f);
+
+ /* Allocate space for the driver info */
+ if(NULL == (drvinfo = (H5O_drvinfo_t *)H5MM_calloc(sizeof(H5O_drvinfo_t))))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, NULL, "memory allocation failed for driver info message")
+
+ /* Version number */
+ drv_vers = *image++;
+ if(drv_vers != HDF5_DRIVERINFO_VERSION_0)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad driver information block version number")
+
+ image += 3; /* reserved bytes */
+
+ /* Driver info size */
+ UINT32DECODE(image, drvinfo->len);
+
+ /* Driver name and/or version */
+ HDstrncpy(drv_name, (const char *)image, (size_t)8);
+ drv_name[8] = '\0';
+ image += 8; /* advance past name/version */
+
+ /* Handle metadata cache retry for variable-sized portion of the driver info block */
+ if(len != (H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo->len)) {
+ /* Sanity check */
+ HDassert(len == H5F_DRVINFOBLOCK_HDR_SIZE);
+
+ /* extend the eoa if required so that we can read the complete driver info block */
+ {
+ haddr_t eoa;
+ haddr_t min_eoa;
+
+ /* get current eoa... */
+ if ((eoa = H5FD_get_eoa(udata->f->shared->lf, H5FD_MEM_SUPER)) == HADDR_UNDEF)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, NULL, "driver get_eoa request failed")
+
+ /* ... if it is too small, extend it. */
+ min_eoa = udata->driver_addr + H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo->len;
+
+ if ( H5F_addr_gt(min_eoa, eoa) )
+ if(H5FD_set_eoa(udata->f->shared->lf, H5FD_MEM_SUPER, min_eoa) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, \
+ "set end of space allocation request failed")
+ }
- /* Reset the dirty flag. */
- sblock->cache_info.is_dirty = FALSE;
+ } /* end if */
+ else {
+ /* Validate and decode driver information */
+ if(H5FD_sb_load(udata->f->shared->lf, drv_name, image) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "unable to decode driver information")
} /* end if */
- if(destroy)
- if(H5F_sblock_dest(f, sblock) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CLOSEERROR, FAIL, "can't close superblock")
+ /* Sanity check */
+ HDassert((size_t)(image - (const uint8_t *)_image) <= len);
+
+ /* Set return value */
+ ret_value = drvinfo;
done:
+ /* Release the [possibly partially initialized] driver info message on error */
+ if(!ret_value && drvinfo)
+ H5MM_xfree(drvinfo);
+
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5F_sblock_flush() */
+} /* end H5F__cache_drvrinfo_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5F_sblock_dest
+ * Function: H5F__cache_drvrinfo_image_len
*
- * Purpose: Frees memory used by the superblock.
+ * Purpose: Compute the size of the data structure on disk.
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Mike McGreevy
- * April 8, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 20, 2013
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5F_sblock_dest(H5F_t H5_ATTR_UNUSED *f, H5F_super_t* sblock)
+H5F__cache_drvrinfo_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5O_drvinfo_t *drvinfo = (const H5O_drvinfo_t *)_thing; /* Pointer to the object */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /* Sanity check */
- HDassert(sblock);
+ /* Check arguments */
+ HDassert(drvinfo);
+ HDassert(drvinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(drvinfo->cache_info.type == H5AC_DRVRINFO);
+ HDassert(image_len);
+ HDassert(compressed_ptr);
- /* Free superblock */
- if(H5F__super_free(sblock) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTFREE, FAIL, "unable to destroy superblock")
+ /* Set the image length size */
+ *image_len = (size_t)(H5F_DRVINFOBLOCK_HDR_SIZE + /* Fixed-size portion of driver info block */
+ drvinfo->len); /* Variable-size portion of driver info block */
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5F_sblock_dest() */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5F__cache_drvrinfo_image_len() */
/*-------------------------------------------------------------------------
- * Function: H5F_sblock_clear
+ * Function: H5F__cache_drvrinfo_serialize
*
- * Purpose: Mark the superblock as no longer being dirty.
+ * Purpose: Flushes a dirty object to disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Mike McGreevy
- * April 8, 2009
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 20 2013
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5F_sblock_clear(H5F_t *f, H5F_super_t *sblock, hbool_t destroy)
+H5F__cache_drvrinfo_serialize(const H5F_t *f, void *_image, size_t len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5O_drvinfo_t *drvinfo = (H5O_drvinfo_t *)_thing; /* Pointer to the object */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint8_t *dbuf; /* Pointer to beginning of driver info */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
- HDassert(sblock);
+ /* check arguments */
+ HDassert(f);
+ HDassert(image);
+ HDassert(drvinfo);
+ HDassert(drvinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(drvinfo->cache_info.type == H5AC_DRVRINFO);
+ HDassert(len == (size_t)(H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo->len));
+
+ /* Save pointer to beginning of driver info */
+ dbuf = image;
+
+ /* Encode the driver information block */
+ *image++ = HDF5_DRIVERINFO_VERSION_0; /* Version */
+ *image++ = 0; /* reserved */
+ *image++ = 0; /* reserved */
+ *image++ = 0; /* reserved */
+
+ /* Driver info size, excluding header */
+ UINT32ENCODE(image, drvinfo->len);
- /* Reset the dirty flag. */
- sblock->cache_info.is_dirty = FALSE;
+ /* Encode driver-specific data */
+ if(H5FD_sb_encode(f->shared->lf, (char *)image, dbuf + H5F_DRVINFOBLOCK_HDR_SIZE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to encode driver information")
- if(destroy)
- if(H5F_sblock_dest(f, sblock) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTFREE, FAIL, "unable to delete superblock")
+ /* Advance buffer pointer past name & variable-sized portion of driver info */
+ image += 8 + drvinfo->len;
+
+ /* Sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= len);
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5F_sblock_clear() */
+} /* H5F__cache_drvrinfo_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5F_sblock_size
+ * Function: H5F__cache_drvrinfo_free_icr
*
- * Purpose: Returns the size of the superblock encoded on disk.
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
*
- * Return: Non-negative on success/Negative on failure
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
*
- * Programmer: Mike McGreevy
- * April 8, 2009
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * July 20, 2013
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5F_sblock_size(const H5F_t *f, const H5F_super_t *sblock, size_t *size_ptr)
+H5F__cache_drvrinfo_free_icr(void *_thing)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ H5O_drvinfo_t *drvinfo = (H5O_drvinfo_t *)_thing; /* Pointer to the object */
- /* check arguments */
- HDassert(f);
- HDassert(sblock);
- HDassert(size_ptr);
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(drvinfo);
+ HDassert(drvinfo->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(drvinfo->cache_info.type == H5AC_DRVRINFO);
- /* Set size value */
- *size_ptr = (size_t)H5F_SUPERBLOCK_SIZE(sblock->super_vers, f);
+ /* Destroy driver info message */
+ H5MM_xfree(drvinfo);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5F_sblock_size() */
+} /* H5F__cache_drvrinfo_free_icr() */
diff --git a/src/H5Gcache.c b/src/H5Gcache.c
index bedceb6..8dfecb1 100644
--- a/src/H5Gcache.c
+++ b/src/H5Gcache.c
@@ -46,7 +46,6 @@
/****************/
#define H5G_NODE_VERS 1 /* Symbol table node version number */
-#define H5G_NODE_BUF_SIZE 512 /* Size of stack buffer for serialized nodes */
/******************/
@@ -64,12 +63,14 @@
/********************/
/* Metadata cache (H5AC) callbacks */
-static H5G_node_t *H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5G_node_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- H5G_node_t *sym, unsigned *flags_ptr);
-static herr_t H5G_node_dest(H5F_t *f, H5G_node_t *sym);
-static herr_t H5G_node_clear(H5F_t *f, H5G_node_t *sym, hbool_t destroy);
-static herr_t H5G_node_size(const H5F_t *f, const H5G_node_t *sym, size_t *size_ptr);
+static herr_t H5G__cache_node_get_load_size(const void *udata, size_t *image_len);
+static void *H5G__cache_node_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5G__cache_node_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5G__cache_node_serialize(const H5F_t *f, void *image,
+ size_t len, void *thing);
+static herr_t H5G__cache_node_free_icr(void *thing);
/*********************/
@@ -88,13 +89,19 @@ static herr_t H5G_node_size(const H5F_t *f, const H5G_node_t *sym, size_t *size_
/* Symbol table nodes inherit cache-like properties from H5AC */
const H5AC_class_t H5AC_SNODE[1] = {{
- H5AC_SNODE_ID,
- (H5AC_load_func_t)H5G_node_load,
- (H5AC_flush_func_t)H5G_node_flush,
- (H5AC_dest_func_t)H5G_node_dest,
- (H5AC_clear_func_t)H5G_node_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5G_node_size,
+ H5AC_SNODE_ID, /* Metadata client ID */
+ "Symbol table node", /* Metadata client name (for debugging) */
+ H5FD_MEM_BTREE, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5G__cache_node_get_load_size, /* 'get_load_size' callback */
+ H5G__cache_node_deserialize, /* 'deserialize' callback */
+ H5G__cache_node_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5G__cache_node_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5G__cache_node_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
@@ -106,42 +113,80 @@ H5FL_SEQ_EXTERN(H5G_entry_t);
/*-------------------------------------------------------------------------
- * Function: H5G_node_load
+ * Function: H5G__cache_node_get_load_size()
*
- * Purpose: Loads a symbol table node from the file.
+ * Purpose: Determine the size of the on disk image of the node, and
+ * return this value in *image_len.
*
- * Return: Success: Ptr to the new table.
+ * Note that this computation requires access to the file pointer,
+ * which is not provided in the parameter list for this callback.
+ * Finesse this issue by passing in the file pointer twice to the
+ * H5AC_protect() call -- once as the file pointer proper, and
+ * again as the user data.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Failure: NULL
- *
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jun 23 1997
+ * Programmer: John Mainzer
+ * 7/21/14
*
*-------------------------------------------------------------------------
*/
-static H5G_node_t *
-H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
+static herr_t
+H5G__cache_node_get_load_size(const void *_udata, size_t *image_len)
{
- H5G_node_t *sym = NULL;
- H5WB_t *wb = NULL; /* Wrapped buffer for node data */
- uint8_t node_buf[H5G_NODE_BUF_SIZE]; /* Buffer for node */
- uint8_t *node; /* Pointer to node buffer */
- const uint8_t *p;
- H5G_node_t *ret_value; /*for error handling */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- /*
- * Check arguments.
- */
+ const H5F_t *f = (const H5F_t *)_udata; /* User data for callback */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
HDassert(f);
- HDassert(H5F_addr_defined(addr));
- HDassert(udata);
+ HDassert(image_len);
+
+ /* report image length */
+ *image_len = (size_t)(H5G_NODE_SIZE(f));
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5G__cache_node_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5G__cache_node_deserialize
+ *
+ * Purpose: Given a buffer containing the on disk image of a symbol table
+ * node, allocate an instance of H5G_node_t, load the contence of the
+ * image into it, and return a pointer to the instance.
+ *
+ * Note that deserializing the image requires access to the file
+ * pointer, which is not included in the parameter list for this
+ * callback. Finesse this issue by passing in the file pointer
+ * twice to the H5AC_protect() call -- once as the file pointer
+ * proper, and again as the user data
+ *
+ * Return: Success: Pointer to in core representation
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5G__cache_node_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
+{
+ H5F_t *f = (H5F_t *)_udata; /* User data for callback */
+ H5G_node_t *sym = NULL; /* Symbol table node created */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer to image to deserialize */
+ void * ret_value; /* Return value */
- /*
- * Initialize variables.
- */
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(image);
+ HDassert(len > 0);
+ HDassert(f);
+ HDassert(dirty);
/* Allocate symbol table data structures */
if(NULL == (sym = H5FL_CALLOC(H5G_node_t)))
@@ -150,262 +195,175 @@ H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
if(NULL == (sym->entry = H5FL_SEQ_CALLOC(H5G_entry_t, (size_t)(2 * H5F_SYM_LEAF_K(f)))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- /* Wrap the local buffer for serialized node info */
- if(NULL == (wb = H5WB_wrap(node_buf, sizeof(node_buf))))
- HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, NULL, "can't wrap buffer")
-
- /* Get a pointer to a buffer that's large enough for node */
- if(NULL == (node = (uint8_t *)H5WB_actual(wb, sym->node_size)))
- HGOTO_ERROR(H5E_SYM, H5E_NOSPACE, NULL, "can't get actual buffer")
-
- /* Read the serialized symbol table node. */
- if(H5F_block_read(f, H5FD_MEM_BTREE, addr, sym->node_size, dxpl_id, node) < 0)
- HGOTO_ERROR(H5E_SYM, H5E_READERROR, NULL, "unable to read symbol table node")
-
- /* Get temporary pointer to serialized node */
- p = node;
-
/* magic */
- if(HDmemcmp(p, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "bad symbol table node signature")
- p += H5_SIZEOF_MAGIC;
+ if(HDmemcmp(image, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, NULL, "bad symbol table node signature")
+ image += H5_SIZEOF_MAGIC;
/* version */
- if(H5G_NODE_VERS != *p++)
- HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "bad symbol table node version")
+ if(H5G_NODE_VERS != *image++)
+ HGOTO_ERROR(H5E_SYM, H5E_VERSION, NULL, "bad symbol table node version")
/* reserved */
- p++;
+ image++;
/* number of symbols */
- UINT16DECODE(p, sym->nsyms);
+ UINT16DECODE(image, sym->nsyms);
/* entries */
- if(H5G__ent_decode_vec(f, &p, sym->entry, sym->nsyms) < 0)
+ if(H5G__ent_decode_vec(f, &image, sym->entry, sym->nsyms) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "unable to decode symbol table entries")
/* Set return value */
ret_value = sym;
done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_SYM, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
if(!ret_value)
if(sym && H5G__node_free(sym) < 0)
HDONE_ERROR(H5E_SYM, H5E_CANTFREE, NULL, "unable to destroy symbol table node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5G_node_load() */
+} /* end H5G__cache_node_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5G_node_flush
+ * Function: H5G__cache_node_image_len
*
- * Purpose: Flush a symbol table node to disk.
+ * Purpose: Compute the size of the data structure on disk and return
+ * it in *image_len.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jun 23 1997
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5G_node_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5G_node_t *sym, unsigned H5_ATTR_UNUSED * flags_ptr)
+H5G__cache_node_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- H5WB_t *wb = NULL; /* Wrapped buffer for node data */
- uint8_t node_buf[H5G_NODE_BUF_SIZE]; /* Buffer for node */
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5G_node_t *sym = (const H5G_node_t *)_thing; /* Pointer to object */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /*
- * Check arguments.
- */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Sanity checks */
HDassert(sym);
+ HDassert(sym->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(sym->cache_info.type == H5AC_SNODE);
+ HDassert(image_len);
- /*
- * Write the symbol node to disk.
- */
- if(sym->cache_info.is_dirty) {
- uint8_t *node; /* Pointer to node buffer */
- uint8_t *p; /* Pointer into raw data buffer */
-
- /* Wrap the local buffer for serialized node info */
- if(NULL == (wb = H5WB_wrap(node_buf, sizeof(node_buf))))
- HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "can't wrap buffer")
-
- /* Get a pointer to a buffer that's large enough for node */
- if(NULL == (node = (uint8_t *)H5WB_actual(wb, sym->node_size)))
- HGOTO_ERROR(H5E_SYM, H5E_NOSPACE, FAIL, "can't get actual buffer")
-
- /* Get temporary pointer to serialized symbol table node */
- p = node;
-
- /* magic number */
- HDmemcpy(p, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* version number */
- *p++ = H5G_NODE_VERS;
-
- /* reserved */
- *p++ = 0;
-
- /* number of symbols */
- UINT16ENCODE(p, sym->nsyms);
-
- /* entries */
- if(H5G__ent_encode_vec(f, &p, sym->entry, sym->nsyms) < 0)
- HGOTO_ERROR(H5E_SYM, H5E_CANTENCODE, FAIL, "can't serialize")
- HDmemset(p, 0, sym->node_size - (size_t)(p - node));
-
- /* Write the serialized symbol table node. */
- if(H5F_block_write(f, H5FD_MEM_BTREE, addr, sym->node_size, dxpl_id, node) < 0)
- HGOTO_ERROR(H5E_SYM, H5E_WRITEERROR, FAIL, "unable to write symbol table node to the file")
-
- /* Reset the node's dirty flag */
- sym->cache_info.is_dirty = FALSE;
- } /* end if */
+ *image_len = sym->node_size;
- /*
- * Destroy the symbol node? This might happen if the node is being
- * preempted from the cache.
- */
- if(destroy)
- if(H5G_node_dest(f, sym) < 0)
- HGOTO_ERROR(H5E_SYM, H5E_CANTFREE, FAIL, "unable to destroy symbol table node")
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5G__cache_node_image_len() */
-done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_SYM, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer")
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5G_node_flush() */
+/*************************************/
+/* no H5G__cache_node_pre_serialize() */
+/*************************************/
/*-------------------------------------------------------------------------
- * Function: H5G_node_dest
+ * Function: H5G__cache_node_serialize
*
- * Purpose: Destroy a symbol table node in memory.
+ * Purpose: Given a correctly sized buffer and an instace of H5G_node_t,
+ * serialize the contents of the instance of H5G_node_t, and write
+ * this data into the supplied buffer. This buffer will be written
+ * to disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Jan 15 2003
+ * Programmer: John Mainzer
+ * 7/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5G_node_dest(H5F_t *f, H5G_node_t *sym)
+H5G__cache_node_serialize(const H5F_t *f, void *_image, size_t len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5G_node_t *sym = (H5G_node_t *)_thing; /* Pointer to object */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
+ /* Sanity checks */
HDassert(f);
+ HDassert(image);
HDassert(sym);
+ HDassert(sym->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(sym->cache_info.type == H5AC_SNODE);
+ HDassert(len == sym->node_size);
- /* Verify that node is clean */
- HDassert(sym->cache_info.is_dirty == FALSE);
+ /* magic number */
+ HDmemcpy(image, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!sym->cache_info.free_file_space_on_destroy || H5F_addr_defined(sym->cache_info.addr));
+ /* version number */
+ *image++ = H5G_NODE_VERS;
- /* Check for freeing file space for symbol table node */
- if(sym->cache_info.free_file_space_on_destroy) {
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_BTREE, H5AC_dxpl_id, sym->cache_info.addr, (hsize_t)sym->node_size) < 0)
- HGOTO_ERROR(H5E_SYM, H5E_CANTFREE, FAIL, "unable to free symbol table node")
- } /* end if */
+ /* reserved */
+ *image++ = 0;
- /* Destroy symbol table node */
- if(H5G__node_free(sym) < 0)
- HGOTO_ERROR(H5E_SYM, H5E_CANTFREE, FAIL, "unable to destroy symbol table node")
+ /* number of symbols */
+ UINT16ENCODE(image, sym->nsyms);
+
+ /* entries */
+ if(H5G__ent_encode_vec(f, &image, sym->entry, sym->nsyms) < 0)
+ HGOTO_ERROR(H5E_SYM, H5E_CANTENCODE, FAIL, "can't serialize")
+
+ /* Clear rest of symbol table node */
+ HDmemset(image, 0, sym->node_size - (size_t)(image - (uint8_t *)_image));
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5G_node_dest() */
+} /* end H5G__cache_node_serialize() */
+
+
+/***************************************/
+/* no H5G__cache_node_notify() function */
+/***************************************/
/*-------------------------------------------------------------------------
- * Function: H5G_node_clear
+ * Function: H5G__cache_node_free_icr
*
- * Purpose: Mark a symbol table node in memory as non-dirty.
+ * Purpose: Destroys a symbol table node in memory.
*
- * Return: Non-negative on success/Negative on failure
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Mar 20 2003
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5G_node_clear(H5F_t *f, H5G_node_t *sym, hbool_t destroy)
+H5G__cache_node_free_icr(void *_thing)
{
- herr_t ret_value = SUCCEED;
+ H5G_node_t *sym = (H5G_node_t *)_thing; /* Pointer to the object */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
+ /* Sanity checks */
HDassert(sym);
+ HDassert(sym->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(sym->cache_info.type == H5AC_SNODE);
- /* Reset the node's dirty flag */
- sym->cache_info.is_dirty = FALSE;
-
- /*
- * Destroy the symbol node? This might happen if the node is being
- * preempted from the cache.
- */
- if(destroy)
- if(H5G_node_dest(f, sym) < 0)
- HGOTO_ERROR(H5E_SYM, H5E_CANTFREE, FAIL, "unable to destroy symbol table node")
+ /* Destroy symbol table node */
+ if(H5G__node_free(sym) < 0)
+ HGOTO_ERROR(H5E_SYM, H5E_CANTFREE, FAIL, "unable to destroy symbol table node")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5G_node_clear() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5G_node_size
- *
- * Purpose: Compute the size in bytes of the specified instance of
- * H5G_node_t on disk, and return it in *size_ptr. On failure
- * the value of size_ptr is undefined.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 5/13/04
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5G_node_size(const H5F_t H5_ATTR_UNUSED *f, const H5G_node_t *sym, size_t *size_ptr)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /*
- * Check arguments.
- */
- HDassert(f);
- HDassert(size_ptr);
-
- *size_ptr = sym->node_size;
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5G_node_size() */
+} /* end H5G__cache_node_free_icr() */
diff --git a/src/H5Gent.c b/src/H5Gent.c
index 3f243de..3809933 100644
--- a/src/H5Gent.c
+++ b/src/H5Gent.c
@@ -460,7 +460,7 @@ H5G__ent_convert(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, const char *name,
targ_oloc.addr = lnk->u.hard.addr;
/* Get the object header */
- if(NULL == (oh = H5O_protect(&targ_oloc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(&targ_oloc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect target object header")
/* Check if a symbol table message exists */
diff --git a/src/H5Gnode.c b/src/H5Gnode.c
index d695dac..cbb9a8e 100644
--- a/src/H5Gnode.c
+++ b/src/H5Gnode.c
@@ -541,7 +541,7 @@ H5G_node_found(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void H5_ATTR_UNUSED
/*
* Load the symbol table node for exclusive access.
*/
- if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
+ if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, FAIL, "unable to protect symbol table node")
/* Get base address of heap */
@@ -647,7 +647,7 @@ H5G_node_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr,
/*
* Load the symbol node.
*/
- if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_WRITE)))
+ if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to protect symbol table node")
/* Get base address of heap */
@@ -691,7 +691,7 @@ H5G_node_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr,
if(H5G_node_create(f, dxpl_id, H5B_INS_FIRST, NULL, NULL, NULL, new_node_p/*out*/) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, H5B_INS_ERROR, "unable to split symbol table node")
- if(NULL == (snrt = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, *new_node_p, f, H5AC_WRITE)))
+ if(NULL == (snrt = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, *new_node_p, f, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to split symbol table node")
HDmemcpy(snrt->entry, sn->entry + H5F_SYM_LEAF_K(f),
@@ -808,7 +808,7 @@ H5G_node_remove(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key/*in,out*/,
HDassert(udata && udata->common.heap);
/* Load the symbol table */
- if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_WRITE)))
+ if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to protect symbol table node")
/* "Normal" removal of a single entry from the symbol table node */
@@ -1001,7 +1001,7 @@ H5G__node_iterate(H5F_t *f, hid_t dxpl_id, const void H5_ATTR_UNUSED *_lt_key, h
HDassert(udata && udata->heap);
/* Protect the symbol table node & local heap while we iterate over entries */
- if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
+ if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node")
/*
@@ -1078,7 +1078,7 @@ H5G__node_sumup(H5F_t *f, hid_t dxpl_id, const void H5_ATTR_UNUSED *_lt_key, had
HDassert(num_objs);
/* Find the object node and add the number of symbol entries. */
- if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
+ if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node")
*num_objs += sn->nsyms;
@@ -1123,7 +1123,7 @@ H5G__node_by_idx(H5F_t *f, hid_t dxpl_id, const void H5_ATTR_UNUSED *_lt_key, ha
HDassert(udata);
/* Get a pointer to the symbol table node */
- if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
+ if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node");
/* Find the node, locate the object symbol table entry and retrieve the name */
@@ -1261,11 +1261,11 @@ H5G__node_copy(H5F_t *f, hid_t dxpl_id, const void H5_ATTR_UNUSED *_lt_key, hadd
HDassert(udata);
/* load the symbol table into memory from the source file */
- if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
+ if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node")
/* get the base address of the heap */
- if(NULL == (heap = H5HL_protect(f, dxpl_id, udata->src_heap_addr, H5AC_READ)))
+ if(NULL == (heap = H5HL_protect(f, dxpl_id, udata->src_heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, H5_ITER_ERROR, "unable to protect symbol name")
/* copy object in this node one by one */
@@ -1420,7 +1420,7 @@ H5G__node_build_table(H5F_t *f, hid_t dxpl_id, const void H5_ATTR_UNUSED *_lt_ke
* Save information about the symbol table node since we can't lock it
* because we're about to call an application function.
*/
- if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
+ if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node")
/* Check if the link table needs to be extended */
@@ -1527,14 +1527,14 @@ H5G_node_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent,
/* Pin the heap down in memory */
if(heap_addr > 0 && H5F_addr_defined(heap_addr))
- if(NULL == (heap = H5HL_protect(f, dxpl_id, heap_addr, H5AC_READ)))
+ if(NULL == (heap = H5HL_protect(f, dxpl_id, heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, FAIL, "unable to protect symbol table heap")
/*
* If we couldn't load the symbol table node, then try loading the
* B-tree node.
*/
- if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ))) {
+ if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG))) {
H5G_bt_common_t udata; /*data to pass through B-tree */
H5E_clear_stack(NULL); /* discard that error */
diff --git a/src/H5Gstab.c b/src/H5Gstab.c
index 4c338c9..08188ae 100644
--- a/src/H5Gstab.c
+++ b/src/H5Gstab.c
@@ -160,7 +160,7 @@ H5G__stab_create_components(H5F_t *f, H5O_stab_t *stab, size_t size_hint, hid_t
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "can't create heap")
/* Pin the heap down in memory */
- if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC_WRITE)))
+ if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Insert name into the heap */
@@ -276,7 +276,7 @@ H5G__stab_insert_real(H5F_t *f, const H5O_stab_t *stab, const char *name,
HDassert(obj_lnk);
/* Pin the heap down in memory */
- if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC_WRITE)))
+ if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Initialize data to pass through B-tree */
@@ -373,7 +373,7 @@ H5G__stab_remove(const H5O_loc_t *loc, hid_t dxpl_id, H5RS_str_t *grp_full_path_
HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "not a symbol table")
/* Pin the heap down in memory */
- if(NULL == (heap = H5HL_protect(loc->file, dxpl_id, stab.heap_addr, H5AC_WRITE)))
+ if(NULL == (heap = H5HL_protect(loc->file, dxpl_id, stab.heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Initialize data to pass through B-tree */
@@ -431,7 +431,7 @@ H5G__stab_remove_by_idx(const H5O_loc_t *grp_oloc, hid_t dxpl_id, H5RS_str_t *gr
HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "not a symbol table")
/* Pin the heap down in memory */
- if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC_WRITE)))
+ if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Initialize data to pass through B-tree */
@@ -485,7 +485,7 @@ H5G__stab_delete(H5F_t *f, hid_t dxpl_id, const H5O_stab_t *stab)
HDassert(H5F_addr_defined(stab->heap_addr));
/* Pin the heap down in memory */
- if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC_WRITE)))
+ if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Set up user data for B-tree deletion */
@@ -546,7 +546,7 @@ H5G__stab_iterate(const H5O_loc_t *oloc, hid_t dxpl_id, H5_iter_order_t order,
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address")
/* Pin the heap down in memory */
- if(NULL == (heap = H5HL_protect(oloc->file, dxpl_id, stab.heap_addr, H5AC_READ)))
+ if(NULL == (heap = H5HL_protect(oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Check on iteration order */
@@ -766,7 +766,7 @@ H5G__stab_get_name_by_idx(const H5O_loc_t *oloc, H5_iter_order_t order, hsize_t
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address")
/* Pin the heap down in memory */
- if(NULL == (heap = H5HL_protect(oloc->file, dxpl_id, stab.heap_addr, H5AC_READ)))
+ if(NULL == (heap = H5HL_protect(oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Remap index for decreasing iteration order */
@@ -888,7 +888,7 @@ H5G__stab_lookup(const H5O_loc_t *grp_oloc, const char *name, H5O_link_t *lnk,
HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "can't read message")
/* Pin the heap down in memory */
- if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC_READ)))
+ if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Set up user data to pass to 'find' operation callback */
@@ -989,7 +989,7 @@ H5G__stab_lookup_by_idx(const H5O_loc_t *grp_oloc, H5_iter_order_t order, hsize_
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address")
/* Pin the heap down in memory */
- if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC_READ)))
+ if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Remap index for decreasing iteration order */
@@ -1081,10 +1081,10 @@ H5G__stab_valid(H5O_loc_t *grp_oloc, hid_t dxpl_id, H5O_stab_t *alt_stab)
} /* end if */
/* Check if the symbol table message's heap address is valid */
- if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC_READ))) {
+ if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG))) {
/* Address is invalid, try the heap address in the alternate symbol
* table message */
- if(!alt_stab || NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, alt_stab->heap_addr, H5AC_READ)))
+ if(!alt_stab || NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, alt_stab->heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "unable to locate heap")
else {
/* The alternate symbol table's heap address is valid. Adjust the
diff --git a/src/H5Gtest.c b/src/H5Gtest.c
index 89b4a37..1c0adea 100644
--- a/src/H5Gtest.c
+++ b/src/H5Gtest.c
@@ -637,7 +637,7 @@ H5G__verify_cached_stab_test(H5O_loc_t *grp_oloc, H5G_entry_t *ent)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "b-tree address is invalid")
/* Verify that the heap address is valid */
- if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC_READ)))
+ if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "heap address is invalid")
done:
@@ -686,7 +686,7 @@ H5G_verify_cached_stabs_test_cb(H5F_t *f, hid_t dxpl_id,
HDassert(H5F_addr_defined(addr));
/* Load the node */
- if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
+ if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node")
/* Check each target object to see if its stab message (if present) matches
@@ -701,7 +701,7 @@ H5G_verify_cached_stabs_test_cb(H5F_t *f, hid_t dxpl_id,
targ_oloc.addr = sn->entry[i].header;
/* Load target object header */
- if(NULL == (targ_oh = H5O_protect(&targ_oloc, dxpl_id, H5AC_READ)))
+ if(NULL == (targ_oh = H5O_protect(&targ_oloc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to protect target object header")
/* Check if a symbol table message exists */
diff --git a/src/H5HF.c b/src/H5HF.c
index f57c5e2..0464da4 100644
--- a/src/H5HF.c
+++ b/src/H5HF.c
@@ -172,7 +172,7 @@ H5HF_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam)
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "memory allocation failed for fractal heap info")
/* Lock the heap header into memory */
- if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_WRITE)))
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap header")
/* Point fractal heap wrapper at header and bump it's ref count */
@@ -231,7 +231,7 @@ H5HF_open(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr)
HDassert(H5F_addr_defined(fh_addr));
/* Load the heap header into memory */
- if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_READ)))
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap header")
/* Check for pending heap deletion */
@@ -821,7 +821,7 @@ H5HF_close(H5HF_t *fh, hid_t dxpl_id)
H5HF_hdr_t *hdr; /* Another pointer to fractal heap header */
/* Lock the heap header into memory */
- if(NULL == (hdr = H5HF_hdr_protect(fh->f, dxpl_id, heap_addr, H5AC_WRITE)))
+ if(NULL == (hdr = H5HF_hdr_protect(fh->f, dxpl_id, heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Delete heap, starting with header (unprotects header) */
@@ -865,7 +865,7 @@ H5HF_delete(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr)
HDassert(H5F_addr_defined(fh_addr));
/* Lock the heap header into memory */
- if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_WRITE)))
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Check for files using shared heap header */
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index 07e5b36..ec8d9a6 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -53,12 +53,6 @@
#define H5HF_DBLOCK_VERSION 0 /* Direct block */
#define H5HF_IBLOCK_VERSION 0 /* Indirect block */
-/* Size of stack buffer for serialized headers */
-#define H5HF_HDR_BUF_SIZE 512
-
-/* Size of stack buffer for serialized indirect blocks */
-#define H5HF_IBLOCK_BUF_SIZE 4096
-
/******************/
/* Local Typedefs */
@@ -79,24 +73,48 @@ static herr_t H5HF__dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *d
static herr_t H5HF__dtable_decode(H5F_t *f, const uint8_t **pp, H5HF_dtable_t *dtable);
/* Metadata cache (H5AC) callbacks */
-static H5HF_hdr_t *H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5HF_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5HF_hdr_t *hdr, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5HF_cache_hdr_dest(H5F_t *f, H5HF_hdr_t *hdr);
-static herr_t H5HF_cache_hdr_clear(H5F_t *f, H5HF_hdr_t *hdr, hbool_t destroy);
-static herr_t H5HF_cache_hdr_size(const H5F_t *f, const H5HF_hdr_t *hdr, size_t *size_ptr);
-static H5HF_indirect_t *H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5HF_cache_iblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5HF_indirect_t *iblock, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5HF_cache_iblock_dest(H5F_t *f, H5HF_indirect_t *iblock);
-static herr_t H5HF_cache_iblock_clear(H5F_t *f, H5HF_indirect_t *iblock, hbool_t destroy);
-static herr_t H5HF_cache_iblock_notify(H5C_notify_action_t action, H5HF_indirect_t *iblock);
-static herr_t H5HF_cache_iblock_size(const H5F_t *f, const H5HF_indirect_t *iblock, size_t *size_ptr);
-static H5HF_direct_t *H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5HF_cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5HF_direct_t *dblock, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5HF_cache_dblock_dest(H5F_t *f, H5HF_direct_t *dblock);
-static herr_t H5HF_cache_dblock_clear(H5F_t *f, H5HF_direct_t *dblock, hbool_t destroy);
-static herr_t H5HF_cache_dblock_notify(H5C_notify_action_t action, H5HF_direct_t *dblock);
-static herr_t H5HF_cache_dblock_size(const H5F_t *f, const H5HF_direct_t *dblock, size_t *size_ptr);
-
+static herr_t H5HF__cache_hdr_get_load_size(const void *udata, size_t *image_len);
+static void *H5HF__cache_hdr_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5HF__cache_hdr_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5HF__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id,
+ void *thing, haddr_t addr, size_t len, size_t compressed_len,
+ haddr_t *new_addr, size_t *new_len, size_t *new_compressed_len,
+ unsigned *flags);
+static herr_t H5HF__cache_hdr_serialize(const H5F_t *f, void *image,
+ size_t len, void *thing);
+static herr_t H5HF__cache_hdr_free_icr(void *thing);
+
+static herr_t H5HF__cache_iblock_get_load_size(const void *udata, size_t *image_len);
+static void *H5HF__cache_iblock_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5HF__cache_iblock_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5HF__cache_iblock_pre_serialize(const H5F_t *f, hid_t dxpl_id,
+ void *thing, haddr_t addr, size_t len, size_t compressed_len,
+ haddr_t *new_addr, size_t *new_len, size_t *new_compressed_len,
+ unsigned *flags);
+static herr_t H5HF__cache_iblock_serialize(const H5F_t *f, void *image,
+ size_t len, void *thing);
+static herr_t H5HF__cache_iblock_notify(H5C_notify_action_t action, void *thing);
+static herr_t H5HF__cache_iblock_free_icr(void *thing);
+
+static herr_t H5HF__cache_dblock_get_load_size(const void *udata, size_t *image_len);
+static void *H5HF__cache_dblock_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5HF__cache_dblock_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5HF__cache_dblock_pre_serialize(const H5F_t *f, hid_t dxpl_id,
+ void *thing, haddr_t addr, size_t len, size_t compressed_len,
+ haddr_t *new_addr, size_t *new_len, size_t *new_compressed_len,
+ unsigned *flags);
+static herr_t H5HF__cache_dblock_serialize(const H5F_t *f, void *image,
+ size_t len, void *thing);
+static herr_t H5HF__cache_dblock_notify(H5C_notify_action_t action, void *thing);
+static herr_t H5HF__cache_dblock_free_icr(void *thing);
/* Debugging Function Prototypes */
#ifndef NDEBUG
@@ -117,35 +135,53 @@ static herr_t H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, hid_t dxpl_i
/* H5HF header inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_FHEAP_HDR[1] = {{
- H5AC_FHEAP_HDR_ID,
- (H5AC_load_func_t)H5HF_cache_hdr_load,
- (H5AC_flush_func_t)H5HF_cache_hdr_flush,
- (H5AC_dest_func_t)H5HF_cache_hdr_dest,
- (H5AC_clear_func_t)H5HF_cache_hdr_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5HF_cache_hdr_size,
+ H5AC_FHEAP_HDR_ID, /* Metadata client ID */
+ "fractal heap header", /* Metadata client name (for debugging) */
+ H5FD_MEM_FHEAP_HDR, /* File space memory type for client */
+ H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
+ H5HF__cache_hdr_get_load_size, /* 'get_load_size' callback */
+ H5HF__cache_hdr_deserialize, /* 'deserialize' callback */
+ H5HF__cache_hdr_image_len, /* 'image_len' callback */
+ H5HF__cache_hdr_pre_serialize, /* 'pre_serialize' callback */
+ H5HF__cache_hdr_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5HF__cache_hdr_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
/* H5HF indirect block inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_FHEAP_IBLOCK[1] = {{
- H5AC_FHEAP_IBLOCK_ID,
- (H5AC_load_func_t)H5HF_cache_iblock_load,
- (H5AC_flush_func_t)H5HF_cache_iblock_flush,
- (H5AC_dest_func_t)H5HF_cache_iblock_dest,
- (H5AC_clear_func_t)H5HF_cache_iblock_clear,
- (H5AC_notify_func_t)H5HF_cache_iblock_notify,
- (H5AC_size_func_t)H5HF_cache_iblock_size,
+ H5AC_FHEAP_IBLOCK_ID, /* Metadata client ID */
+ "fractal heap indirect block", /* Metadata client name (for debugging) */
+ H5FD_MEM_FHEAP_IBLOCK, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5HF__cache_iblock_get_load_size, /* 'get_load_size' callback */
+ H5HF__cache_iblock_deserialize, /* 'deserialize' callback */
+ H5HF__cache_iblock_image_len, /* 'image_len' callback */
+ H5HF__cache_iblock_pre_serialize, /* 'pre_serialize' callback */
+ H5HF__cache_iblock_serialize, /* 'serialize' callback */
+ H5HF__cache_iblock_notify, /* 'notify' callback */
+ H5HF__cache_iblock_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
/* H5HF direct block inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_FHEAP_DBLOCK[1] = {{
- H5AC_FHEAP_DBLOCK_ID,
- (H5AC_load_func_t)H5HF_cache_dblock_load,
- (H5AC_flush_func_t)H5HF_cache_dblock_flush,
- (H5AC_dest_func_t)H5HF_cache_dblock_dest,
- (H5AC_clear_func_t)H5HF_cache_dblock_clear,
- (H5AC_notify_func_t)H5HF_cache_dblock_notify,
- (H5AC_size_func_t)H5HF_cache_dblock_size,
+ H5AC_FHEAP_DBLOCK_ID, /* Metadata client ID */
+ "fractal heap direct block", /* Metadata client name (for debugging) */
+ H5FD_MEM_FHEAP_DBLOCK, /* File space memory type for client */
+ H5C__CLASS_COMPRESSED_FLAG, /* Client class behavior flags */
+ H5HF__cache_dblock_get_load_size, /* 'get_load_size' callback */
+ H5HF__cache_dblock_deserialize, /* 'deserialize' callback */
+ H5HF__cache_dblock_image_len, /* 'image_len' callback */
+ H5HF__cache_dblock_pre_serialize, /* 'pre_serialize' callback */
+ H5HF__cache_dblock_serialize, /* 'serialize' callback */
+ H5HF__cache_dblock_notify, /* 'notify' callback */
+ H5HF__cache_dblock_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
@@ -268,119 +304,191 @@ H5HF__dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dtable)
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_hdr_load
+ * Function: H5HF__cache_hdr_get_load_size()
*
- * Purpose: Loads a fractal heap header from the disk.
+ * Purpose: Determine the size of the fractal heap header on disk,
+ * and set *image_len to this value.
*
- * Return: Success: Pointer to a new fractal heap
+ * This code is based on the old H5HF_cache_hdr_load() routine
+ * that was used with the version 2 metadata cache. Note the
+ * use of a dummy header to compute the on disk size of the header.
+ *
+ * Note also that the value returned by this function presumes that
+ * there is no I/O filtering data in the header. If there is, the
+ * size reported will be too small, and H5C_load_entry()
+ * will have to make two tries to load the fractal heap header.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF__cache_hdr_get_load_size(const void *_udata, size_t *image_len)
+{
+ const H5HF_hdr_cache_ud_t *udata = (const H5HF_hdr_cache_ud_t *)_udata; /* pointer to user data */
+ H5HF_hdr_t dummy_hdr; /* dummy header -- to compute size */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(udata);
+ HDassert(image_len);
+
+ /* Set the internal parameters for the heap */
+ dummy_hdr.f = udata->f;
+ dummy_hdr.sizeof_size = H5F_SIZEOF_SIZE(udata->f);
+ dummy_hdr.sizeof_addr = H5F_SIZEOF_ADDR(udata->f);
+
+ /* Compute the 'base' size of the fractal heap header on disk */
+ *image_len = (size_t)H5HF_HEADER_SIZE(&dummy_hdr);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HF__cache_hdr_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF__cache_hdr_deserialize
+ *
+ * Purpose: Given a buffer containing an on disk image of a fractal heap
+ * header block, allocate an instance of H5HF_hdr_t, load the contents
+ * of the buffer into into the new instance of H5HF_hdr_t, and then
+ * return a pointer to the new instance.
+ *
+ * Since H5HF__cache_hdr_get_load_size() reports header on disk size
+ * base on the assumption that the header contains no I/O filtering
+ * data, it is possible that the provided image will be too small.
+ *
+ * In this case, we DO NOT flag an error when this is discovered.
+ * Instead, we make note of the correct image size, and report
+ * success.
+ *
+ * Since H5HF__cache_hdr_image_len() callback is defined,
+ * H5C_load_entry() will call H5HF__cache_hdr_image_len() and
+ * obtain the correct image length.
+ *
+ * Since the H5AC__CLASS_SPECULATIVE_LOAD_FLAG is set,
+ * H5C_load_entry() will load an image of the correct size, and
+ * then call this function again to deserialize it. Before doing
+ * so, it will also call H5HF__cache_hdr_free_icr() to discard the
+ * result of the first deserialize call.
+ *
+ * Note that the v2 B-tree and free space manager associated
+ * with the fractal heap (roots stored in the huge_bt2 and fspace
+ * fields respectively) are not loaded at this time. As best I can
+ * tell from reviewing the code, they are loaded or created when
+ * they are accessed.
+ *
+ * Return: Success: Pointer to in core representation
* Failure: NULL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 24 2006
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static H5HF_hdr_t *
-H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+static void *
+H5HF__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
{
- H5HF_hdr_t *hdr = NULL; /* Fractal heap info */
- H5HF_hdr_cache_ud_t *udata = (H5HF_hdr_cache_ud_t *)_udata;
- size_t size; /* Header size */
- H5WB_t *wb = NULL; /* Wrapped buffer for header data */
- uint8_t hdr_buf[H5HF_HDR_BUF_SIZE]; /* Buffer for header */
- uint8_t *buf; /* Pointer to header buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ H5HF_hdr_t *hdr = NULL; /* Fractal heap info */
+ H5HF_hdr_cache_ud_t *udata = (H5HF_hdr_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into into supplied image */
+ size_t size; /* Header size */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
uint8_t heap_flags; /* Status flags for heap */
- H5HF_hdr_t *ret_value; /* Return value */
+ void * ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Sanity checks */
+ HDassert(image);
+ HDassert(len > 0);
HDassert(udata);
+ HDassert(dirty);
/* Allocate space for the fractal heap data structure */
if(NULL == (hdr = H5HF_hdr_alloc(udata->f)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- /* Wrap the local buffer for serialized header info */
- if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "can't wrap buffer")
-
/* Compute the 'base' size of the fractal heap header on disk */
size = (size_t)H5HF_HEADER_SIZE(hdr);
- /* Get a pointer to a buffer that's large enough for serialized header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, NULL, "can't get actual buffer")
-
- /* Read header from disk */
- if(H5F_block_read(f, H5FD_MEM_FHEAP_HDR, addr, size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "can't read fractal heap header")
-
- /* Get temporary pointer to serialized header */
- p = buf;
+ /* the size we have just calculated presumes that there is no I/O
+ * filter information in the header. If there is no filter information,
+ * the deserialize operation should succeed.
+ *
+ * If there is filter information, the first attempt to deserialize
+ * the header will reveal this. In this case, we will be unable to
+ * deserialize the header as the supplied image will be too small.
+ * However, we will make note of the correct size and report success
+ * anyway.
+ *
+ * When H5C_load_entry() calls H5HF__cache_hdr_image_len(), we will report
+ * the correct size. Since the H5C__CLASS_SPECULATIVE_LOAD_FLAG is set,
+ * this will prompt H5C_load_entry() to load the correct size image,
+ * discard the result of the first attempt at deserialization, and
+ * call this routine a second time to deserialize the correct size
+ * buffer.
+ */
+ HDassert(size <= len);
/* Magic number */
- if(HDmemcmp(p, H5HF_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong fractal heap header signature")
- p += H5_SIZEOF_MAGIC;
+ if(HDmemcmp(image, H5HF_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "wrong fractal heap header signature")
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5HF_HDR_VERSION)
+ if(*image++ != H5HF_HDR_VERSION)
HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap header version")
/* General heap information */
- UINT16DECODE(p, hdr->id_len); /* Heap ID length */
- UINT16DECODE(p, hdr->filter_len); /* I/O filters' encoded length */
+ UINT16DECODE(image, hdr->id_len); /* Heap ID length */
+ UINT16DECODE(image, hdr->filter_len); /* I/O filters' encoded length */
/* Heap status flags */
/* (bit 0: "huge" object IDs have wrapped) */
/* (bit 1: checksum direct blocks) */
- heap_flags = *p++;
+ heap_flags = *image++;
hdr->huge_ids_wrapped = heap_flags & H5HF_HDR_FLAGS_HUGE_ID_WRAPPED;
hdr->checksum_dblocks = heap_flags & H5HF_HDR_FLAGS_CHECKSUM_DBLOCKS;
/* "Huge" object information */
- UINT32DECODE(p, hdr->max_man_size); /* Max. size of "managed" objects */
- H5F_DECODE_LENGTH(udata->f, p, hdr->huge_next_id); /* Next ID to use for "huge" object */
- H5F_addr_decode(udata->f, &p, &hdr->huge_bt2_addr); /* Address of "huge" object tracker B-tree */
+ UINT32DECODE(image, hdr->max_man_size); /* Max. size of "managed" objects */
+ H5F_DECODE_LENGTH(udata->f, image, hdr->huge_next_id); /* Next ID to use for "huge" object */
+ H5F_addr_decode(udata->f, &image, &hdr->huge_bt2_addr); /* Address of "huge" object tracker B-tree */
/* "Managed" object free space information */
- H5F_DECODE_LENGTH(udata->f, p, hdr->total_man_free); /* Internal free space in managed direct blocks */
- H5F_addr_decode(udata->f, &p, &hdr->fs_addr); /* Address of free section header */
+ H5F_DECODE_LENGTH(udata->f, image, hdr->total_man_free); /* Internal free space in managed direct blocks */
+ H5F_addr_decode(udata->f, &image, &hdr->fs_addr); /* Address of free section header */
/* Heap statistics */
- H5F_DECODE_LENGTH(udata->f, p, hdr->man_size);
- H5F_DECODE_LENGTH(udata->f, p, hdr->man_alloc_size);
- H5F_DECODE_LENGTH(udata->f, p, hdr->man_iter_off);
- H5F_DECODE_LENGTH(udata->f, p, hdr->man_nobjs);
- H5F_DECODE_LENGTH(udata->f, p, hdr->huge_size);
- H5F_DECODE_LENGTH(udata->f, p, hdr->huge_nobjs);
- H5F_DECODE_LENGTH(udata->f, p, hdr->tiny_size);
- H5F_DECODE_LENGTH(udata->f, p, hdr->tiny_nobjs);
+ H5F_DECODE_LENGTH(udata->f, image, hdr->man_size);
+ H5F_DECODE_LENGTH(udata->f, image, hdr->man_alloc_size);
+ H5F_DECODE_LENGTH(udata->f, image, hdr->man_iter_off);
+ H5F_DECODE_LENGTH(udata->f, image, hdr->man_nobjs);
+ H5F_DECODE_LENGTH(udata->f, image, hdr->huge_size);
+ H5F_DECODE_LENGTH(udata->f, image, hdr->huge_nobjs);
+ H5F_DECODE_LENGTH(udata->f, image, hdr->tiny_size);
+ H5F_DECODE_LENGTH(udata->f, image, hdr->tiny_nobjs);
/* Managed objects' doubling-table info */
- if(H5HF__dtable_decode(hdr->f, &p, &(hdr->man_dtable)) < 0)
+ if(H5HF__dtable_decode(hdr->f, &image, &(hdr->man_dtable)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTENCODE, NULL, "unable to encode managed obj. doubling table info")
/* Sanity check */
/* (allow for checksum not decoded yet) */
- HDassert((size_t)(p - (const uint8_t *)buf) == (size - H5HF_SIZEOF_CHKSUM));
+ HDassert((size_t)(image - (const uint8_t *)_image) == (size - H5HF_SIZEOF_CHKSUM));
/* Check for I/O filter information to decode */
if(hdr->filter_len > 0) {
- size_t filter_info_off; /* Offset in header of filter information */
size_t filter_info_size; /* Size of filter information */
H5O_pline_t *pline; /* Pipeline information from the header on disk */
- /* Compute the offset of the filter info in the header */
- filter_info_off = (size_t)(p - (const uint8_t *)buf);
-
/* Compute the size of the extra filter information */
filter_info_size = (size_t)(hdr->sizeof_size /* Size of size for filtered root direct block */
+ (unsigned)4 /* Size of filter mask for filtered root direct block */
@@ -389,28 +497,27 @@ H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Compute the heap header's size */
hdr->heap_size = size + filter_info_size;
- /* Re-size current buffer */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, hdr->heap_size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, NULL, "can't get actual buffer")
-
- /* Read in I/O filter information */
- /* (and the checksum) */
- if(H5F_block_read(f, H5FD_MEM_FHEAP_HDR, (addr + filter_info_off), (filter_info_size + H5HF_SIZEOF_CHKSUM), dxpl_id, (buf + filter_info_off)) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "can't read fractal heap header's I/O pipeline filter info")
+ if(size == len)
+ /* we were supplied with too small a buffer -- goto done
+ * and let H5C_load_entry() retry with a larger buffer
+ */
+ HGOTO_DONE((void *)hdr)
- /* Point at correct offset in header for the filter information */
- p = buf + filter_info_off;
+ else
+ if((size + filter_info_size) != len)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "bad image len")
/* Decode the size of a filtered root direct block */
- H5F_DECODE_LENGTH(udata->f, p, hdr->pline_root_direct_size);
+ H5F_DECODE_LENGTH(udata->f, image, hdr->pline_root_direct_size);
/* Decode the filter mask for a filtered root direct block */
- UINT32DECODE(p, hdr->pline_root_direct_filter_mask);
+ UINT32DECODE(image, hdr->pline_root_direct_filter_mask);
/* Decode I/O filter information */
- if(NULL == (pline = (H5O_pline_t *)H5O_msg_decode(hdr->f, udata->dxpl_id, NULL, H5O_PLINE_ID, p)))
+ if(NULL == (pline = (H5O_pline_t *)H5O_msg_decode(hdr->f, udata->dxpl_id, NULL, H5O_PLINE_ID, image)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, NULL, "can't decode I/O pipeline filters")
- p += hdr->filter_len;
+
+ image += hdr->filter_len;
/* Copy the information into the header's I/O pipeline structure */
if(NULL == H5O_msg_copy(H5O_PLINE_ID, pline, &(hdr->pline)))
@@ -425,13 +532,13 @@ H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Compute checksum on entire header */
/* (including the filter information, if present) */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - (const uint8_t *)buf), 0);
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - (const uint8_t *)buf) == hdr->heap_size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == hdr->heap_size);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -442,291 +549,324 @@ H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, NULL, "can't finish initializing shared fractal heap header")
/* Set return value */
- ret_value = hdr;
+ ret_value = (void *)hdr;
done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_HEAP, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
if(!ret_value && hdr)
if(H5HF_hdr_free(hdr) < 0)
HDONE_ERROR(H5E_HEAP, H5E_CANTRELEASE, NULL, "unable to release fractal heap header")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HF_cache_hdr_load() */ /*lint !e818 Can't make udata a pointer to const */
+} /* end H5HF__cache_hdr_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_hdr_flush
+ * Function: H5HF__cache_hdr_image_len
*
- * Purpose: Flushes a dirty fractal heap header to disk.
+ * Purpose: Return the actual size of the fractal heap header on
+ * disk image.
*
- * Return: Non-negative on success/Negative on failure
+ * If the header contains filter information, this size will be
+ * larger than the value returned by H5HF__cache_hdr_get_load_size().
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 24 2006
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5HF_hdr_t *hdr, unsigned H5_ATTR_UNUSED * flags_ptr)
+static herr_t
+H5HF__cache_hdr_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- H5WB_t *wb = NULL; /* Wrapped buffer for header data */
- uint8_t hdr_buf[H5HF_HDR_BUF_SIZE]; /* Buffer for header */
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5HF_hdr_t *hdr = (const H5HF_hdr_t *)_thing; /* Fractal heap info */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /* check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Sanity checks */
HDassert(hdr);
+ HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
+ HDassert(image_len);
- if(hdr->cache_info.is_dirty) {
- uint8_t *buf; /* Temporary raw data buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Header size on disk */
- uint8_t heap_flags; /* Status flags for heap */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
-
-#ifndef NDEBUG
-{
- /* Verify that flush dependencies are working correctly. Do this
- * by verifying that either:
- *
- * 1) the header has a root iblock, and that the root iblock and all
- * of its children are clean, or
- *
- * 2) The header has a root dblock, which is clean, or
- *
- * 3) The heap is empty, and thus the header has neither a root
- * iblock no a root dblock. In this case, the flush ordering
- * constraint is met by default.
- *
- * Do this with a call to H5HF__cache_verify_hdr_descendants_clean().
- */
- hbool_t descendants_clean = TRUE;
+ *image_len = hdr->heap_size;
- if(H5HF__cache_verify_hdr_descendants_clean(f, dxpl_id, hdr, &descendants_clean) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify hdr descendants clean.")
- HDassert(descendants_clean);
-}
-#endif /* NDEBUG */
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HF__cache_hdr_image_len() */
- /* Set the shared heap header's file context for this operation */
- hdr->f = f;
-
- /* Wrap the local buffer for serialized header info */
- if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't wrap buffer")
-
- /* Compute the size of the heap header on disk */
- size = hdr->heap_size;
-
- /* Get a pointer to a buffer that's large enough for serialized header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "can't get actual buffer")
-
- /* Get temporary pointer to serialized header */
- p = buf;
-
- /* Magic number */
- HDmemcpy(p, H5HF_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* Version # */
- *p++ = H5HF_HDR_VERSION;
-
- /* General heap information */
- UINT16ENCODE(p, hdr->id_len); /* Heap ID length */
- UINT16ENCODE(p, hdr->filter_len); /* I/O filters' encoded length */
-
- /* Heap status flags */
- /* (bit 0: "huge" object IDs have wrapped) */
- /* (bit 1: checksum direct blocks) */
- heap_flags = 0;
- heap_flags = (uint8_t)(heap_flags | (hdr->huge_ids_wrapped ? H5HF_HDR_FLAGS_HUGE_ID_WRAPPED : 0));
- heap_flags = (uint8_t)(heap_flags | (hdr->checksum_dblocks ? H5HF_HDR_FLAGS_CHECKSUM_DBLOCKS : 0));
- *p++ = heap_flags;
-
- /* "Huge" object information */
- UINT32ENCODE(p, hdr->max_man_size); /* Max. size of "managed" objects */
- H5F_ENCODE_LENGTH(f, p, hdr->huge_next_id); /* Next ID to use for "huge" object */
- H5F_addr_encode(f, &p, hdr->huge_bt2_addr); /* Address of "huge" object tracker B-tree */
-
- /* "Managed" object free space information */
- H5F_ENCODE_LENGTH(f, p, hdr->total_man_free); /* Internal free space in managed direct blocks */
- H5F_addr_encode(f, &p, hdr->fs_addr); /* Address of free section header */
-
- /* Heap statistics */
- H5F_ENCODE_LENGTH(f, p, hdr->man_size);
- H5F_ENCODE_LENGTH(f, p, hdr->man_alloc_size);
- H5F_ENCODE_LENGTH(f, p, hdr->man_iter_off);
- H5F_ENCODE_LENGTH(f, p, hdr->man_nobjs);
- H5F_ENCODE_LENGTH(f, p, hdr->huge_size);
- H5F_ENCODE_LENGTH(f, p, hdr->huge_nobjs);
- H5F_ENCODE_LENGTH(f, p, hdr->tiny_size);
- H5F_ENCODE_LENGTH(f, p, hdr->tiny_nobjs);
-
- /* Managed objects' doubling-table info */
- if(H5HF__dtable_encode(hdr->f, &p, &(hdr->man_dtable)) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTENCODE, FAIL, "unable to encode managed obj. doubling table info")
-
- /* Check for I/O filter information to encode */
- if(hdr->filter_len > 0) {
- /* Encode the size of a filtered root direct block */
- H5F_ENCODE_LENGTH(f, p, hdr->pline_root_direct_size);
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF__cache_hdr_pre_serialize
+ *
+ * Purpose: As best I can tell, fractal heap header blocks are always
+ * allocated in real file space. Thus this routine simply verifies
+ * this, verifies that the len parameter contains the expected
+ * value, and returns an error if either of these checks fail.
+ *
+ * When compiled in debug mode, the function also verifies that all
+ * indirect and direct blocks that are children of the header are
+ * either clean, or not in the metadata cache.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
+ haddr_t addr, size_t len, size_t H5_ATTR_UNUSED compressed_len,
+ haddr_t H5_ATTR_UNUSED *new_addr, size_t H5_ATTR_UNUSED *new_len,
+ size_t H5_ATTR_UNUSED *new_compressed_len, unsigned *flags)
+{
+ H5HF_hdr_t *hdr = (H5HF_hdr_t *)_thing; /* Fractal heap info */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Encode the filter mask for a filtered root direct block */
- UINT32ENCODE(p, hdr->pline_root_direct_filter_mask);
+ FUNC_ENTER_STATIC
- /* Encode I/O filter information */
- if(H5O_msg_encode(hdr->f, H5O_PLINE_ID, FALSE, p, &(hdr->pline)) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTENCODE, FAIL, "can't encode I/O pipeline fiters")
- p += hdr->filter_len;
- } /* end if */
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(hdr);
+ HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(addr == hdr->heap_addr);
+ HDassert(new_addr);
+ HDassert(new_len);
+ HDassert(flags);
- /* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+#ifndef NDEBUG
+{
+ hbool_t descendants_clean = TRUE;
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
+ /* Verify that flush dependencies are working correctly. Do this
+ * by verifying that either:
+ *
+ * 1) the header has a root iblock, and that the root iblock and all
+ * of its children are clean, or
+ *
+ * 2) The header has a root dblock, which is clean, or
+ *
+ * 3) The heap is empty, and thus the header has neither a root
+ * iblock no a root dblock. In this case, the flush ordering
+ * constraint is met by default.
+ *
+ * Do this with a call to H5HF__cache_verify_hdr_descendants_clean().
+ */
+ if(H5HF__cache_verify_hdr_descendants_clean((H5F_t *)f, dxpl_id, hdr, &descendants_clean) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify hdr descendants clean.")
+ HDassert(descendants_clean);
+}
+#endif /* NDEBUG */
- /* Write the heap header. */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_FHEAP_HDR, addr, size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFLUSH, FAIL, "unable to save fractal heap header to disk")
+ if(H5F_IS_TMP_ADDR(f, addr))
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "addr in temporary space?!?.");
- hdr->cache_info.is_dirty = FALSE;
- } /* end if */
+ if(len != hdr->heap_size)
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "unexpected image len.");
- if(destroy)
- if(H5HF_cache_hdr_dest(f, hdr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy fractal heap header")
+ *flags = 0;
done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_HEAP, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer")
-
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5HF_cache_hdr_flush() */
+} /* end H5HF__cache_hdr_pre_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_hdr_dest
+ * Function: H5HF__cache_hdr_serialize
*
- * Purpose: Destroys a fractal heap header in memory.
+ * Purpose: Construct the on disk image of the header, and place it in
+ * the buffer pointed to by image. Return SUCCEED on success,
+ * and FAIL on failure.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 24 2006
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_hdr_dest(H5F_t *f, H5HF_hdr_t *hdr)
+static herr_t
+H5HF__cache_hdr_serialize(const H5F_t *f, void *_image, size_t len,
+ void *_thing)
{
+ H5HF_hdr_t *hdr = (H5HF_hdr_t *)_thing; /* Fractal heap info */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint8_t heap_flags; /* Status flags for heap */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(image);
HDassert(hdr);
- HDassert(hdr->rc == 0);
+ HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
+ HDassert(len == hdr->heap_size);
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!hdr->cache_info.free_file_space_on_destroy || H5F_addr_defined(hdr->cache_info.addr));
+ /* Set the shared heap header's file context for this operation */
+ hdr->f = f;
+
+ /* Magic number */
+ HDmemcpy(image, H5HF_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- /* Check for freeing file space for heap header */
- if(hdr->cache_info.free_file_space_on_destroy) {
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_FHEAP_HDR, H5AC_dxpl_id, hdr->cache_info.addr, (hsize_t)hdr->heap_size) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap header")
+ /* Version # */
+ *image++ = H5HF_HDR_VERSION;
+
+ /* General heap information */
+ UINT16ENCODE(image, hdr->id_len); /* Heap ID length */
+ UINT16ENCODE(image, hdr->filter_len); /* I/O filters' encoded length */
+
+ /* Heap status flags */
+ /* (bit 0: "huge" object IDs have wrapped) */
+ /* (bit 1: checksum direct blocks) */
+ heap_flags = 0;
+ heap_flags = (uint8_t)(heap_flags | (hdr->huge_ids_wrapped ? H5HF_HDR_FLAGS_HUGE_ID_WRAPPED : 0));
+ heap_flags = (uint8_t)(heap_flags | (hdr->checksum_dblocks ? H5HF_HDR_FLAGS_CHECKSUM_DBLOCKS : 0));
+ *image++ = heap_flags;
+
+ /* "Huge" object information */
+ UINT32ENCODE(image, hdr->max_man_size); /* Max. size of "managed" objects */
+ H5F_ENCODE_LENGTH(f, image, hdr->huge_next_id); /* Next ID to use for "huge" object */
+ H5F_addr_encode(f, &image, hdr->huge_bt2_addr); /* Address of "huge" object tracker B-tree */
+
+ /* "Managed" object free space information */
+ H5F_ENCODE_LENGTH(f, image, hdr->total_man_free); /* Internal free space in managed direct blocks */
+ H5F_addr_encode(f, &image, hdr->fs_addr); /* Address of free section header */
+
+ /* Heap statistics */
+ H5F_ENCODE_LENGTH(f, image, hdr->man_size);
+ H5F_ENCODE_LENGTH(f, image, hdr->man_alloc_size);
+ H5F_ENCODE_LENGTH(f, image, hdr->man_iter_off);
+ H5F_ENCODE_LENGTH(f, image, hdr->man_nobjs);
+ H5F_ENCODE_LENGTH(f, image, hdr->huge_size);
+ H5F_ENCODE_LENGTH(f, image, hdr->huge_nobjs);
+ H5F_ENCODE_LENGTH(f, image, hdr->tiny_size);
+ H5F_ENCODE_LENGTH(f, image, hdr->tiny_nobjs);
+
+ /* Managed objects' doubling-table info */
+ if(H5HF__dtable_encode(hdr->f, &image, &(hdr->man_dtable)) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTENCODE, FAIL, "unable to encode managed obj. doubling table info")
+
+ /* Check for I/O filter information to encode */
+ if(hdr->filter_len > 0) {
+ /* Encode the size of a filtered root direct block */
+ H5F_ENCODE_LENGTH(f, image, hdr->pline_root_direct_size);
+
+ /* Encode the filter mask for a filtered root direct block */
+ UINT32ENCODE(image, hdr->pline_root_direct_filter_mask);
+
+ /* Encode I/O filter information */
+ if(H5O_msg_encode(hdr->f, H5O_PLINE_ID, FALSE, image, &(hdr->pline)) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTENCODE, FAIL, "can't encode I/O pipeline fiters")
+ image += hdr->filter_len;
} /* end if */
- /* Free the shared info itself */
- if(H5HF_hdr_free(hdr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRELEASE, FAIL, "unable to release fractal heap header")
+ /* Compute metadata checksum */
+ metadata_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
+
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
+
+ /* sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) == len);
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HF_cache_hdr_dest() */
+} /* end H5HF__cache_hdr_serialize() */
+
+/***************************************/
+/* no H5HF__cache_hdr_notify() function */
+/***************************************/
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_hdr_clear
+ * Function: H5HF__cache_hdr_free_icr
*
- * Purpose: Mark a fractal heap header in memory as non-dirty.
+ * Purpose: Free the in core representation of the fractal heap header.
*
- * Return: Non-negative on success/Negative on failure
+ * This routine frees just the header itself, not the
+ * associated version 2 B-Tree, the associated Free Space Manager,
+ * nor the indirect/direct block tree that is rooted in the header.
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 24 2006
+ * This routine also does not free the file space that may
+ * be allocated to the header.
+ *
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_hdr_clear(H5F_t *f, H5HF_hdr_t *hdr, hbool_t destroy)
+static herr_t
+H5HF__cache_hdr_free_icr(void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5HF_hdr_t *hdr = (H5HF_hdr_t *)_thing; /* Fractal heap info */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
+ /* Sanity checks */
HDassert(hdr);
+ HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
+ HDassert(hdr->rc == 0);
- /* Reset the dirty flag. */
- hdr->cache_info.is_dirty = FALSE;
-
- if(destroy)
- if(H5HF_cache_hdr_dest(f, hdr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy fractal heap header")
+ if(H5HF_hdr_free(hdr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTRELEASE, FAIL, "unable to release fractal heap header")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HF_cache_hdr_clear() */
+} /* end H5HF__cache_hdr_free_icr() */
+
+/***********************************************************/
+/* metadata cache callback definitions for indirect blocks */
+/***********************************************************/
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_hdr_size
+ * Function: H5HF__cache_iblock_get_load_size()
*
- * Purpose: Compute the size in bytes of a fractal heap header
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Compute the size of the on disk image of the indirect
+ * block, and place this value in *image_len.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 24 2006
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_hdr_size(const H5F_t H5_ATTR_UNUSED *f, const H5HF_hdr_t *hdr, size_t *size_ptr)
+static herr_t
+H5HF__cache_iblock_get_load_size(const void *_udata, size_t *image_len)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ const H5HF_iblock_cache_ud_t *udata = (const H5HF_iblock_cache_ud_t *)_udata; /* User data for callback */
- /* Check arguments */
- HDassert(f);
- HDassert(hdr);
- HDassert(size_ptr);
+ FUNC_ENTER_STATIC_NOERR
- /* Set size value */
- *size_ptr = hdr->heap_size;
+ /* Sanity checks */
+ HDassert(udata);
+ HDassert(image_len);
+
+ *image_len = (size_t)H5HF_MAN_INDIRECT_SIZE(udata->par_info->hdr, *udata->nrows);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HF_cache_hdr_size() */
+} /* end H5HF__cache_iblock_get_load_size() */
/***********************************************************/
/* metadata cache callback definitions for indirect blocks */
@@ -734,53 +874,55 @@ H5HF_cache_hdr_size(const H5F_t H5_ATTR_UNUSED *f, const H5HF_hdr_t *hdr, size_t
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_iblock_load
+ * Function: H5HF__cache_iblock_deserialize
*
- * Purpose: Loads a fractal heap indirect block from the disk.
+ * Purpose: Given a buffer containing the on disk image of the indirect
+ * block, allocate an instance of H5HF_indirect_t, load the data
+ * in the buffer into this new instance, and return a pointer to
+ * it.
*
- * Return: Success: Pointer to a new fractal heap indirect block
+ * As best I can tell, the size of the indirect block image is fully
+ * know before the image is loaded, so this function should succeed
+ * unless the image is corrupt or memory allocation fails.
*
+ * Return: Success: Pointer to in core representation
* Failure: NULL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 27 2006
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static H5HF_indirect_t *
-H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+static void *
+H5HF__cache_iblock_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
{
H5HF_hdr_t *hdr; /* Shared fractal heap information */
- H5HF_iblock_cache_ud_t *udata = (H5HF_iblock_cache_ud_t *)_udata; /* user data for callback */
- H5HF_indirect_t *iblock = NULL; /* Indirect block info */
- H5WB_t *wb = NULL; /* Wrapped buffer for indirect block data */
- uint8_t iblock_buf[H5HF_IBLOCK_BUF_SIZE]; /* Buffer for indirect block */
- uint8_t *buf; /* Temporary buffer */
- const uint8_t *p; /* Pointer into raw data buffer */
+ H5HF_iblock_cache_ud_t *udata = (H5HF_iblock_cache_ud_t *)_udata; /* User data for callback */
+ H5HF_indirect_t *iblock = NULL; /* Indirect block info */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
haddr_t heap_addr; /* Address of heap header in the file */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
unsigned u; /* Local index variable */
- H5HF_indirect_t *ret_value; /* Return value */
+ void * ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Sanity checks */
+ HDassert(image);
HDassert(udata);
-
- /* Allocate space for the fractal heap indirect block */
- if(NULL == (iblock = H5FL_CALLOC(H5HF_indirect_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
-
- /* Get the pointer to the shared heap header */
+ HDassert(dirty);
hdr = udata->par_info->hdr;
+ HDassert(hdr->f);
/* Set the shared heap header's file context for this operation */
hdr->f = udata->f;
+ /* Allocate space for the fractal heap indirect block */
+ if(NULL == (iblock = H5FL_CALLOC(H5HF_indirect_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+
/* Share common heap information */
iblock->hdr = hdr;
if(H5HF_hdr_incr(hdr) < 0)
@@ -791,35 +933,23 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
iblock->nrows = *udata->nrows;
iblock->nchildren = 0;
- /* Wrap the local buffer for serialized indirect block */
- if(NULL == (wb = H5WB_wrap(iblock_buf, sizeof(iblock_buf))))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "can't wrap buffer")
-
/* Compute size of indirect block */
iblock->size = H5HF_MAN_INDIRECT_SIZE(hdr, iblock->nrows);
- /* Get a pointer to a buffer that's large enough for serialized indirect block */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, iblock->size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, NULL, "can't get actual buffer")
-
- /* Read indirect block from disk */
- if(H5F_block_read(f, H5FD_MEM_FHEAP_IBLOCK, addr, iblock->size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "can't read fractal heap indirect block")
-
- /* Get temporary pointer to serialized indirect block */
- p = buf;
+ /* sanity check */
+ HDassert(iblock->size == len);
/* Magic number */
- if(HDmemcmp(p, H5HF_IBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong fractal heap indirect block signature")
- p += H5_SIZEOF_MAGIC;
+ if(HDmemcmp(image, H5HF_IBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "wrong fractal heap indirect block signature")
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5HF_IBLOCK_VERSION)
+ if(*image++ != H5HF_IBLOCK_VERSION)
HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap direct block version")
/* Address of heap that owns this block */
- H5F_addr_decode(udata->f, &p, &heap_addr);
+ H5F_addr_decode(udata->f, &image, &heap_addr);
if(H5F_addr_ne(heap_addr, hdr->heap_addr))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block")
@@ -844,7 +974,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
} /* end else */
/* Offset of heap within the heap's address space */
- UINT64DECODE_VAR(p, iblock->block_off, hdr->heap_off_size);
+ UINT64DECODE_VAR(image, iblock->block_off, hdr->heap_off_size);
/* Allocate & decode child block entry tables */
HDassert(iblock->nrows > 0);
@@ -866,7 +996,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
for(u = 0; u < (iblock->nrows * hdr->man_dtable.cparam.width); u++) {
/* Decode child block address */
- H5F_addr_decode(udata->f, &p, &(iblock->ents[u].addr));
+ H5F_addr_decode(udata->f, &image, &(iblock->ents[u].addr));
/* Check for heap with I/O filters */
if(hdr->filter_len > 0) {
@@ -876,7 +1006,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Decode extra information for direct blocks */
if(u < (hdr->man_dtable.max_direct_rows * hdr->man_dtable.cparam.width)) {
/* Size of filtered direct block */
- H5F_DECODE_LENGTH(udata->f, p, iblock->filt_ents[u].size);
+ H5F_DECODE_LENGTH(udata->f, image, iblock->filt_ents[u].size);
/* Sanity check */
/* (either both the address & size are defined or both are
@@ -886,7 +1016,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
|| (!H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size == 0));
/* I/O filter mask for filtered direct block */
- UINT32DECODE(p, iblock->filt_ents[u].filter_mask);
+ UINT32DECODE(image, iblock->filt_ents[u].filter_mask);
} /* end if */
} /* end if */
@@ -901,13 +1031,13 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HDassert(iblock->nchildren); /* indirect blocks w/no children should have been deleted */
/* Compute checksum on indirect block */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - (const uint8_t *)buf), 0);
+ computed_chksum = H5_checksum_metadata((const uint8_t *)_image, (size_t)(image - (const uint8_t *)_image), 0);
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - (const uint8_t *)buf) == iblock->size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == iblock->size);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -915,7 +1045,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Check if we have any indirect block children */
if(iblock->nrows > hdr->man_dtable.max_direct_rows) {
- unsigned indir_rows; /* Number of indirect rows in this indirect block */
+ unsigned indir_rows;/* Number of indirect rows in this indirect block */
/* Compute the number of indirect rows for this indirect block */
indir_rows = iblock->nrows - hdr->man_dtable.max_direct_rows;
@@ -928,343 +1058,323 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
iblock->child_iblocks = NULL;
/* Set return value */
- ret_value = iblock;
+ ret_value = (void *)iblock;
done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_HEAP, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
if(!ret_value && iblock)
if(H5HF_man_iblock_dest(iblock) < 0)
HDONE_ERROR(H5E_HEAP, H5E_CANTFREE, NULL, "unable to destroy fractal heap indirect block")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HF_cache_iblock_load() */
+} /* end H5HF__cache_iblock_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_iblock_flush
+ * Function: H5HF__cache_iblock_image_len
*
- * Purpose: Flushes a dirty fractal heap indirect block to disk.
+ * Purpose: Return the size of the on disk image of the iblock.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Mar 6 2006
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_iblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5HF_indirect_t *iblock, unsigned H5_ATTR_UNUSED * flags_ptr)
+static herr_t
+H5HF__cache_iblock_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- H5WB_t *wb = NULL; /* Wrapped buffer for indirect block data */
- uint8_t iblock_buf[H5HF_IBLOCK_BUF_SIZE]; /* Buffer for indirect block */
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5HF_indirect_t *iblock = (const H5HF_indirect_t *)_thing; /* Indirect block info */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /* check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Sanity checks */
HDassert(iblock);
+ HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
+ HDassert(image_len);
- if(iblock->cache_info.is_dirty) {
- H5HF_hdr_t *hdr; /* Shared fractal heap information */
- uint8_t *buf; /* Temporary buffer */
- uint8_t *p; /* Pointer into raw data buffer */
-#ifndef NDEBUG
- unsigned nchildren = 0; /* Track # of children */
- unsigned max_child = 0; /* Track max. child entry used */
-#endif /* NDEBUG */
- uint32_t metadata_chksum; /* Computed metadata checksum value */
- size_t u; /* Local index variable */
-
-#ifndef NDEBUG
-{
- /* Verify that flush dependencies are working correctly. Do this
- * by verifying that all children of this iblock are clean.
- */
- hbool_t descendants_clean = TRUE;
- unsigned iblock_status;
-
- if(H5AC_get_entry_status(f, iblock->addr, &iblock_status) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get iblock status")
-
- /* since the current iblock is the guest of honor in a flush, we know
- * that it is locked into the cache for the duration of the call. Hence
- * there is no need to check to see if it is pinned or protected, or to
- * protect it if it is not.
- */
- if(H5HF__cache_verify_iblock_descendants_clean(f, dxpl_id, iblock, &iblock_status, &descendants_clean) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify descendants clean.")
-
- HDassert(descendants_clean);
-}
-#endif /* NDEBUG */
-
- /* Get the pointer to the shared heap header */
- hdr = iblock->hdr;
-
- /* Set the shared heap header's file context for this operation */
- hdr->f = f;
-
- /* Wrap the local buffer for serialized indirect block */
- if(NULL == (wb = H5WB_wrap(iblock_buf, sizeof(iblock_buf))))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't wrap buffer")
-
- /* Get a pointer to a buffer that's large enough for serialized indirect block */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, iblock->size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "can't get actual buffer")
-
- /* Get temporary pointer to buffer for serialized indirect block */
- p = buf;
-
- /* Magic number */
- HDmemcpy(p, H5HF_IBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* Version # */
- *p++ = H5HF_IBLOCK_VERSION;
-
- /* Address of heap header for heap which owns this block */
- H5F_addr_encode(f, &p, hdr->heap_addr);
-
- /* Offset of block in heap */
- UINT64ENCODE_VAR(p, iblock->block_off, hdr->heap_off_size);
-
- /* Encode indirect block-specific fields */
- for(u = 0; u < (iblock->nrows * hdr->man_dtable.cparam.width); u++) {
- /* Encode child block address */
- H5F_addr_encode(f, &p, iblock->ents[u].addr);
+ *image_len = iblock->size;
- /* Check for heap with I/O filters */
- if(hdr->filter_len > 0) {
- /* Sanity check */
- HDassert(iblock->filt_ents);
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HF__cache_iblock_image_len() */
- /* Encode extra information for direct blocks */
- if(u < (hdr->man_dtable.max_direct_rows * hdr->man_dtable.cparam.width)) {
- /* Sanity check */
- /* (either both the address & size are defined or both are
- * not defined)
- */
- HDassert((H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size)
- || (!H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size == 0));
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF__cache_iblock_pre_serialize
+ *
+ * Purpose: The primary objective of this function is to determine if the
+ * indirect block is currently allocated in temporary file space,
+ * and if so, to move it to real file space before the entry is
+ * serialized.
+ *
+ * In debug compiles, this function also verifies that all children
+ * of this indirect block are either clean or are not in cache.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF__cache_iblock_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
+ haddr_t addr, size_t H5_ATTR_UNUSED len, size_t H5_ATTR_UNUSED compressed_len,
+ haddr_t *new_addr, size_t H5_ATTR_UNUSED *new_len,
+ size_t H5_ATTR_UNUSED *new_compressed_len, unsigned *flags)
+{
+ H5HF_hdr_t *hdr; /* Shared fractal heap information */
+ H5HF_indirect_t *iblock = (H5HF_indirect_t *)_thing; /* Indirect block info */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Size of filtered direct block */
- H5F_ENCODE_LENGTH(f, p, iblock->filt_ents[u].size);
+ FUNC_ENTER_STATIC
- /* I/O filter mask for filtered direct block */
- UINT32ENCODE(p, iblock->filt_ents[u].filter_mask);
- } /* end if */
- } /* end if */
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(iblock);
+ HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
+ HDassert(iblock->cache_info.size == iblock->size);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(H5F_addr_eq(iblock->addr, addr));
+ HDassert(new_addr);
+ HDassert(new_len);
+ HDassert(flags);
+ hdr = iblock->hdr;
+ HDassert(hdr);
+ HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
#ifndef NDEBUG
- /* Count child blocks */
- if(H5F_addr_defined(iblock->ents[u].addr)) {
- nchildren++;
- if(u > max_child)
- max_child = u;
- } /* end if */
-#endif /* NDEBUG */
- } /* end for */
-
- /* Compute checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
+{
+ hbool_t descendants_clean = TRUE;
+ unsigned iblock_status = 0;
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
+ /* verify that flush dependencies are working correctly. Do this
+ * by verifying that all children of this iblock are clean.
+ */
+ if(H5AC_get_entry_status(f, iblock->addr, &iblock_status) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get iblock status")
- /* Sanity check */
- HDassert((size_t)(p - buf) == iblock->size);
-#ifndef NDEBUG
- HDassert(nchildren == iblock->nchildren);
- HDassert(max_child == iblock->max_child);
+ /* since the current iblock is the guest of honor in a flush, we know
+ * that it is locked into the cache for the duration of the call. Hence
+ * there is no need to check to see if it is pinned or protected, or to
+ * protect it if it is not.
+ */
+ if(H5HF__cache_verify_iblock_descendants_clean((H5F_t *)f, dxpl_id, iblock, &iblock_status, &descendants_clean) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify descendants clean.")
+ HDassert(descendants_clean);
+}
#endif /* NDEBUG */
- /* Check for needing to re-allocate indirect block from 'temp.' to 'normal' file space */
- if(H5F_IS_TMP_ADDR(f, addr)) {
- /* Sanity check */
- HDassert(H5F_addr_eq(iblock->addr, addr));
+ /* Check to see if we must re-allocate the iblock from temporary to
+ * normal (AKA real) file space.
+ */
+ if(H5F_IS_TMP_ADDR(f, addr)) {
+ haddr_t iblock_addr;
- /* Allocate 'normal' space for the new indirect block on disk */
- if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_FHEAP_IBLOCK, dxpl_id, (hsize_t)iblock->size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap indirect block")
+ /* Allocate 'normal' space for the new indirect block on disk */
+ if(HADDR_UNDEF == (iblock_addr = H5MF_alloc((H5F_t *)f, H5FD_MEM_FHEAP_IBLOCK, dxpl_id, (hsize_t)iblock->size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap indirect block")
- /* Sanity check */
- HDassert(!H5F_addr_eq(iblock->addr, addr));
+ /* Sanity check */
+ HDassert(!H5F_addr_eq(iblock->addr, iblock_addr));
- /* Let the metadata cache know the block moved */
- if(H5AC_move_entry(f, H5AC_FHEAP_IBLOCK, iblock->addr, addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move indirect block")
+ /* Let the metadata cache know the block moved */
+ if(H5AC_move_entry((H5F_t *)f, H5AC_FHEAP_IBLOCK, iblock->addr, iblock_addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move indirect block")
- /* Update the internal address for the block */
- iblock->addr = addr;
+ /* Update the internal address for the block */
+ iblock->addr = iblock_addr;
- /* Check for root indirect block */
- if(NULL == iblock->parent) {
- /* Update information about indirect block's location */
- hdr->man_dtable.table_addr = addr;
+ /* Check for root indirect block */
+ if(NULL == iblock->parent) {
+ /* Update information about indirect block's location */
+ hdr->man_dtable.table_addr = iblock_addr;
- /* Mark that heap header was modified */
- if(H5HF_hdr_dirty(hdr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
- } /* end if */
- else {
- H5HF_indirect_t *par_iblock; /* Parent indirect block */
- unsigned par_entry; /* Entry in parent indirect block */
+ /* Mark that heap header was modified */
+ if(H5HF_hdr_dirty(hdr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
+ } /* end if */
+ else {
+ H5HF_indirect_t *par_iblock; /* Parent indirect block */
+ unsigned par_entry; /* Entry in parent indirect block */
- /* Get parent information */
- par_iblock = iblock->parent;
- par_entry = iblock->par_entry;
+ /* Get parent information */
+ par_iblock = iblock->parent;
+ par_entry = iblock->par_entry;
- /* Update information about indirect block's location */
- par_iblock->ents[par_entry].addr = addr;
+ /* Update information about indirect block's location */
+ par_iblock->ents[par_entry].addr = iblock_addr;
- /* Mark that parent was modified */
- if(H5HF_iblock_dirty(par_iblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
- } /* end if */
+ /* Mark that parent was modified */
+ if(H5HF_iblock_dirty(par_iblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
} /* end if */
- /* Indirect block must be in 'normal' file space now */
- HDassert(!H5F_IS_TMP_ADDR(f, addr));
-
- /* Write the indirect block */
- if(H5F_block_write(f, H5FD_MEM_FHEAP_IBLOCK, addr, iblock->size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFLUSH, FAIL, "unable to save fractal heap indirect block to disk")
-
- /* Reset dirty flags */
- iblock->cache_info.is_dirty = FALSE;
+ *new_addr = iblock_addr;
+ *flags = H5C__SERIALIZE_MOVED_FLAG;
} /* end if */
-
- if(destroy)
- if(H5HF_cache_iblock_dest(f, iblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy fractal heap indirect block")
+ else
+ *flags = 0;
done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_HEAP, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer")
-
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5HF_cache_iblock_flush() */
+} /* end H5HF__cache_iblock_pre_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_iblock_dest
+ * Function: H5HF__cache_iblock_serialize
*
- * Purpose: Destroys a fractal heap indirect block in memory.
+ * Purpose: Given a pointer to an iblock, and a pointer to a buffer of
+ * the appropriate size, write the contents of the iblock to the
+ * buffer in format appropriate for writing to disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Mar 6 2006
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_iblock_dest(H5F_t *f, H5HF_indirect_t *iblock)
+static herr_t
+H5HF__cache_iblock_serialize(const H5F_t *f, void *_image, size_t len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5HF_hdr_t *hdr; /* Shared fractal heap information */
+ H5HF_indirect_t *iblock = (H5HF_indirect_t *)_thing; /* Indirect block info */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+#ifndef NDEBUG
+ unsigned nchildren = 0; /* Track # of children */
+ size_t max_child = 0; /* Track max. child entry used */
+#endif /* NDEBUG */
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
+ size_t u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /*
- * Check arguments.
- */
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(image);
HDassert(iblock);
- HDassert(iblock->rc == 0);
- HDassert(iblock->hdr);
+ HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
+ HDassert(iblock->cache_info.size == iblock->size);
+ HDassert(len == iblock->size);
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!iblock->cache_info.free_file_space_on_destroy || H5F_addr_defined(iblock->cache_info.addr));
-
- /* Check for freeing file space for indirect block */
- if(iblock->cache_info.free_file_space_on_destroy) {
- /* Check if the indirect block is NOT currently allocated in temp. file space */
- /* (temp. file space does not need to be freed) */
- if(!H5F_IS_TMP_ADDR(f, iblock->cache_info.addr)) {
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_FHEAP_IBLOCK, H5AC_dxpl_id, iblock->cache_info.addr, (hsize_t)iblock->size) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap indirect block")
- } /* end if */
- } /* end if */
+ /* Indirect block must be in 'normal' file space */
+ HDassert(!H5F_IS_TMP_ADDR(f, iblock->addr));
+ HDassert(H5F_addr_eq(iblock->addr, iblock->cache_info.addr));
- /* Destroy fractal heap indirect block */
- if(H5HF_man_iblock_dest(iblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy fractal heap indirect block")
+ /* Get the pointer to the shared heap header */
+ hdr = iblock->hdr;
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HF_cache_iblock_dest() */
+ /* Set the shared heap header's file context for this operation */
+ hdr->f = f;
-
-/*-------------------------------------------------------------------------
- * Function: H5HF_cache_iblock_clear
- *
- * Purpose: Mark a fractal heap indirect block in memory as non-dirty.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Mar 6 2006
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5HF_cache_iblock_clear(H5F_t *f, H5HF_indirect_t *iblock, hbool_t destroy)
-{
- herr_t ret_value = SUCCEED; /* Return value */
+ /* Magic number */
+ HDmemcpy(image, H5HF_IBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- FUNC_ENTER_NOAPI_NOINIT
+ /* Version # */
+ *image++ = H5HF_IBLOCK_VERSION;
- /*
- * Check arguments.
- */
- HDassert(iblock);
+ /* Address of heap header for heap which owns this block */
+ H5F_addr_encode(f, &image, hdr->heap_addr);
- /* Reset the dirty flag. */
- iblock->cache_info.is_dirty = FALSE;
+ /* Offset of block in heap */
+ UINT64ENCODE_VAR(image, iblock->block_off, hdr->heap_off_size);
- if(destroy)
- if(H5HF_cache_iblock_dest(f, iblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy fractal heap indirect block")
+ /* Encode indirect block-specific fields */
+ for(u = 0; u < (iblock->nrows * hdr->man_dtable.cparam.width); u++) {
+ /* Encode child block address */
+ H5F_addr_encode(f, &image, iblock->ents[u].addr);
+
+ /* Check for heap with I/O filters */
+ if(hdr->filter_len > 0) {
+ /* Sanity check */
+ HDassert(iblock->filt_ents);
+
+ /* Encode extra information for direct blocks */
+ if(u < (hdr->man_dtable.max_direct_rows * hdr->man_dtable.cparam.width)) {
+ /* Sanity check */
+ /* (either both the address & size are defined or both are
+ * not defined)
+ */
+ HDassert((H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size)
+ || (!H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size == 0));
+
+ /* Size of filtered direct block */
+ H5F_ENCODE_LENGTH(f, image, iblock->filt_ents[u].size);
+
+ /* I/O filter mask for filtered direct block */
+ UINT32ENCODE(image, iblock->filt_ents[u].filter_mask);
+ } /* end if */
+ } /* end if */
+
+#ifndef NDEBUG
+ /* Count child blocks */
+ if(H5F_addr_defined(iblock->ents[u].addr)) {
+ nchildren++;
+ if(u > max_child)
+ max_child = u;
+ } /* end if */
+#endif /* NDEBUG */
+ } /* end for */
+
+ /* Compute checksum */
+ metadata_chksum = H5_checksum_metadata((uint8_t *)_image, (size_t)(image - (uint8_t *)_image), 0);
+
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
+
+ /* Sanity checks */
+ HDassert((size_t)(image - (uint8_t *)_image) == iblock->size);
+#ifndef NDEBUG
+ HDassert(nchildren == iblock->nchildren);
+ HDassert(max_child == iblock->max_child);
+#endif /* NDEBUG */
-done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HF_cache_iblock_clear() */
+} /* end H5HF__cache_iblock_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_iblock_notify
+ * Function: H5HF__cache_iblock_notify
*
- * Purpose: Setup / takedown flush dependencies as indirect blocks
+ * Purpose: This function is used to create and destroy flush dependency
+ * relationships between iblocks and their parents as indirect blocks
* are loaded / inserted and evicted from the metadata cache.
*
- * Return: Non-negative on success/Negative on failure
+ * In general, the parent will be another iblock, but it may be the
+ * header if the iblock in question is the root iblock.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
* Programmer: John Mainzer
- * 5/17/14
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_iblock_notify(H5C_notify_action_t action, H5HF_indirect_t *iblock)
+static herr_t
+H5HF__cache_iblock_notify(H5C_notify_action_t action, void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5HF_indirect_t *iblock = (H5HF_indirect_t *)_thing; /* Indirect block info */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
+ /* Sanity checks */
HDassert(iblock);
HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
HDassert(iblock->hdr);
if(action == H5AC_NOTIFY_ACTION_BEFORE_EVICT)
@@ -1309,6 +1419,7 @@ H5HF_cache_iblock_notify(H5C_notify_action_t action, H5HF_indirect_t *iblock)
switch(action) {
case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
if(iblock->parent) { /* this is a child iblock */
/* create flush dependency with parent iblock */
if(H5AC_create_flush_dependency(iblock->parent, iblock) < 0)
@@ -1321,6 +1432,10 @@ H5HF_cache_iblock_notify(H5C_notify_action_t action, H5HF_indirect_t *iblock)
} /* end else */
break;
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
if(iblock->fd_parent) { /* this is a child iblock */
/* destroy flush dependency with parent iblock */
@@ -1341,38 +1456,104 @@ H5HF_cache_iblock_notify(H5C_notify_action_t action, H5HF_indirect_t *iblock)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HF_cache_iblock_notify() */
+} /* end H5HF__cache_iblock_notify() */
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_iblock_size
+ * Function: H5HF__cache_iblock_free_icr
*
- * Purpose: Compute the size in bytes of a fractal heap indirect block
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Unlink the supplied instance of H5HF_indirect_t from the
+ * fractal heap and free its memory.
*
- * Return: Non-negative on success/Negative on failure
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Mar 6 2006
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_iblock_size(const H5F_t H5_ATTR_UNUSED *f, const H5HF_indirect_t *iblock, size_t *size_ptr)
+static herr_t
+H5HF__cache_iblock_free_icr(void *thing)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ H5HF_indirect_t *iblock = (H5HF_indirect_t *)thing; /* Fractal heap indirect block to free */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Check arguments */
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
HDassert(iblock);
- HDassert(size_ptr);
+ HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
+ HDassert(iblock->rc == 0);
+ HDassert(iblock->hdr);
+
+ /* Destroy fractal heap indirect block */
+ if(H5HF_man_iblock_dest(iblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy fractal heap indirect block")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF__cache_iblock_free_icr() */
+
+/*********************************************************/
+/* metadata cache callback definitions for direct blocks */
+/*********************************************************/
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF__cache_dblock_get_load_size()
+ *
+ * Purpose: Determine the size of the direct block on disk image, and
+ * return it in *image_len.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF__cache_dblock_get_load_size(const void *_udata, size_t *image_len)
+{
+ const H5HF_dblock_cache_ud_t *udata = (const H5HF_dblock_cache_ud_t *)_udata; /* User data for callback */
+ const H5HF_parent_t *par_info; /* Pointer to parent information */
+ const H5HF_hdr_t *hdr; /* Shared fractal heap information */
+ size_t size;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(udata);
+ HDassert(image_len);
+ par_info = (const H5HF_parent_t *)(&(udata->par_info));
+ HDassert(par_info);
+ hdr = par_info->hdr;
+ HDassert(hdr);
+ HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
+
+ /* Check for I/O filters on this heap */
+ if(hdr->filter_len > 0) {
+ /* Check for root direct block */
+ if(par_info->iblock == NULL)
+ size = hdr->pline_root_direct_size;
+ else
+ size = par_info->iblock->filt_ents[par_info->entry].size;
+ } /* end if */
+ else
+ size = udata->dblock_size;
- /* Set size value */
- *size_ptr = iblock->size;
+ *image_len = size;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HF_cache_iblock_size() */
+} /* end H5HF__cache_dblock_get_load_size() */
/*********************************************************/
/* metadata cache callback definitions for direct blocks */
@@ -1380,49 +1561,55 @@ H5HF_cache_iblock_size(const H5F_t H5_ATTR_UNUSED *f, const H5HF_indirect_t *ibl
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_dblock_load
+ * Function: H5HF__cache_dblock_deserialize
*
- * Purpose: Loads a fractal heap direct block from the disk.
+ * Purpose: Given a buffer containing the on disk image of a direct
+ * block, allocate an instance of H5HF_direct_t, load the data
+ * in the buffer into this new instance, and return a pointer to
+ * it.
*
- * Return: Success: Pointer to a new fractal heap direct block
+ * As best I can tell, the size of the direct block image is fully
+ * know before the image is loaded, so this function should succeed
+ * unless the image is corrupt or memory allocation fails.
*
+ * Return: Success: Pointer to in core representation
* Failure: NULL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 27 2006
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static H5HF_direct_t *
-H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+static void *
+H5HF__cache_dblock_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
{
H5HF_hdr_t *hdr; /* Shared fractal heap information */
H5HF_dblock_cache_ud_t *udata = (H5HF_dblock_cache_ud_t *)_udata; /* User data for callback */
H5HF_parent_t *par_info; /* Pointer to parent information */
- H5HF_direct_t *dblock = NULL; /* Direct block info */
- const uint8_t *p; /* Pointer into raw data buffer */
+ H5HF_direct_t *dblock = NULL; /* Direct block info */
+ const uint8_t *image; /* Pointer into raw data buffer */
haddr_t heap_addr; /* Address of heap header in the file */
- H5HF_direct_t *ret_value; /* Return value */
+ void * ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
- HDassert(udata != NULL);
- HDassert(udata->f != NULL);
- HDassert(udata->dblock_size > 0);
+ /* Sanity checks */
+ HDassert(_image);
+ HDassert(udata);
+ par_info = (H5HF_parent_t *)(&(udata->par_info));
+ HDassert(par_info);
+ hdr = par_info->hdr;
+ HDassert(hdr);
+ HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
+ HDassert(dirty);
/* Allocate space for the fractal heap direct block */
if(NULL == (dblock = H5FL_MALLOC(H5HF_direct_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
HDmemset(&dblock->cache_info, 0, sizeof(H5AC_info_t));
- /* Get the pointer to the shared heap header */
- par_info = (H5HF_parent_t *)(&(udata->par_info));
- hdr = par_info->hdr;
-
/* Set the shared heap header's file context for this operation */
hdr->f = udata->f;
@@ -1435,6 +1622,10 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
dblock->size = udata->dblock_size;
dblock->file_size = 0;
+ /* initialize fields used in serialization */
+ dblock->write_buf = NULL;
+ dblock->write_size = 0;
+
/* Allocate block buffer */
/* XXX: Change to using free-list factories */
if(NULL == (dblock->blk = H5FL_BLK_MALLOC(direct_block, (size_t)dblock->size)))
@@ -1449,28 +1640,21 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
unsigned filter_mask; /* Excluded filters for direct block */
/* Check for root direct block */
- if(par_info->iblock == NULL) {
- /* Sanity check */
- HDassert(H5F_addr_eq(hdr->man_dtable.table_addr, addr));
-
+ if(par_info->iblock == NULL)
/* Set up parameters to read filtered direct block */
read_size = hdr->pline_root_direct_size;
- } /* end if */
- else {
- /* Sanity check */
- HDassert(H5F_addr_eq(par_info->iblock->ents[par_info->entry].addr, addr));
-
+ else
/* Set up parameters to read filtered direct block */
read_size = par_info->iblock->filt_ents[par_info->entry].size;
- } /* end else */
+ HDassert(len == read_size);
- /* Allocate buffer to perform I/O filtering on */
+ /* Allocate buffer to perform I/O filtering on and copy image into
+ * it. Must do this as H5Z_pipeline() may re-sized the buffer
+ * provided to it.
+ */
if(NULL == (read_buf = H5MM_malloc(read_size)))
HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, NULL, "memory allocation failed for pipeline buffer")
-
- /* Read filtered direct block from disk */
- if(H5F_block_read(f, H5FD_MEM_FHEAP_DBLOCK, addr, read_size, dxpl_id, read_buf) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "can't read fractal heap direct block")
+ HDmemcpy(read_buf, _image, len);
/* Push direct block data through I/O filter pipeline */
nbytes = read_size;
@@ -1488,25 +1672,25 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
H5MM_xfree(read_buf);
} /* end if */
else {
- /* Read direct block from disk */
- if(H5F_block_read(f, H5FD_MEM_FHEAP_DBLOCK, addr, dblock->size, dxpl_id, dblock->blk) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "can't read fractal heap direct block")
+ /* copy image to dblock->blk */
+ HDassert(dblock->size == len);
+ HDmemcpy(dblock->blk, _image, dblock->size);
} /* end else */
/* Start decoding direct block */
- p = dblock->blk;
+ image = dblock->blk;
/* Magic number */
- if(HDmemcmp(p, H5HF_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong fractal heap direct block signature")
- p += H5_SIZEOF_MAGIC;
+ if(HDmemcmp(image, H5HF_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "wrong fractal heap direct block signature")
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(*p++ != H5HF_DBLOCK_VERSION)
+ if(*image++ != H5HF_DBLOCK_VERSION)
HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap direct block version")
/* Address of heap that owns this block (just for file integrity checks) */
- H5F_addr_decode(udata->f, &p, &heap_addr);
+ H5F_addr_decode(udata->f, &image, &heap_addr);
if(H5F_addr_ne(heap_addr, hdr->heap_addr))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block")
@@ -1521,7 +1705,7 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
} /* end if */
/* Offset of heap within the heap's address space */
- UINT64DECODE_VAR(p, dblock->block_off, hdr->heap_off_size);
+ UINT64DECODE_VAR(image, dblock->block_off, hdr->heap_off_size);
/* Decode checksum on direct block, if requested */
if(hdr->checksum_dblocks) {
@@ -1529,11 +1713,11 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
uint32_t computed_chksum; /* Computed metadata checksum value */
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Reset checksum field, for computing the checksum */
/* (Casting away const OK - QAK) */
- HDmemset((uint8_t *)p - H5HF_SIZEOF_CHKSUM, 0, (size_t)H5HF_SIZEOF_CHKSUM);
+ HDmemset((uint8_t *)image - H5HF_SIZEOF_CHKSUM, 0, (size_t)H5HF_SIZEOF_CHKSUM);
/* Compute checksum on entire direct block */
computed_chksum = H5_checksum_metadata(dblock->blk, dblock->size, 0);
@@ -1544,10 +1728,10 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
} /* end if */
/* Sanity check */
- HDassert((size_t)(p - dblock->blk) == (size_t)H5HF_MAN_ABS_DIRECT_OVERHEAD(hdr));
+ HDassert((size_t)(image - dblock->blk) == (size_t)H5HF_MAN_ABS_DIRECT_OVERHEAD(hdr));
/* Set return value */
- ret_value = dblock;
+ ret_value = (void *)dblock;
done:
if(!ret_value && dblock)
@@ -1555,402 +1739,646 @@ done:
HDONE_ERROR(H5E_HEAP, H5E_CANTFREE, NULL, "unable to destroy fractal heap direct block")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HF_cache_dblock_load() */
+} /* end H5HF__cache_dblock_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_dblock_flush
+ * Function: H5HF__cache_dblock_image_len
*
- * Purpose: Flushes a dirty fractal heap direct block to disk.
+ * Purpose: Report the actual size of the direct block image on disk.
+ * Note that this value will probably be incorrect if compression
+ * is enabled and the entry is dirty.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 27 2006
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5HF_direct_t *dblock, unsigned H5_ATTR_UNUSED * flags_ptr)
+static herr_t
+H5HF__cache_dblock_image_len(const void *_thing, size_t *image_len, hbool_t *compressed_ptr, size_t *compressed_image_len_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5HF_direct_t *dblock = (const H5HF_direct_t *)_thing; /* Direct block info */
+ const H5HF_indirect_t *par_iblock; /* Parent iblock */
+ const H5HF_hdr_t *hdr; /* Shared fractal heap information */
+ hbool_t compressed;
+ size_t size;
+ size_t compressed_size;
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /* check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Sanity checks */
HDassert(dblock);
+ HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(dblock->cache_info.type == H5AC_FHEAP_DBLOCK);
+ HDassert(image_len);
+ HDassert(compressed_ptr);
+ HDassert(compressed_image_len_ptr);
- if(dblock->cache_info.is_dirty) {
- H5HF_hdr_t *hdr; /* Shared fractal heap information */
- hbool_t at_tmp_addr = H5F_IS_TMP_ADDR(f, addr); /* Flag to indicate direct block is at temporary address */
- void *write_buf; /* Pointer to buffer to write out */
- size_t write_size; /* Size of buffer to write out */
- uint8_t *p; /* Pointer into raw data buffer */
+ /* Set up convenience variables */
+ hdr = dblock->hdr;
+ par_iblock = dblock->parent;
- /* Get the pointer to the shared heap header */
- hdr = dblock->hdr;
+ /* Check for I/O filters on this heap */
+ if(hdr->filter_len > 0) {
- /* Set the shared heap header's file context for this operation */
- hdr->f = f;
+ /* Filters are enabled, so set compressed to TRUE, and set
+ * size equal to the uncompressed size of the direct block.
+ * If the data is available, set compressed_size to the compressed
+ * size of the direct block -- otherwise set it equal to the
+ * uncompressed size.
+ *
+ * We have three possible scenarios here.
+ *
+ * First, the block may never have been flushed. In this
+ * case, both dblock->file_size and the size stored in the
+ * parent (either the header or the parent iblock) will all
+ * be zero. In this case, return the uncompressed size
+ * stored in dblock->size as the compressed size.
+ *
+ * Second, the block may have just been serialized, in which
+ * case, dblock->file_size should be zero, and the correct
+ * on disk size should be stored in the parent (again, either
+ * the header or the parent iblock as case may be).
+ *
+ * Third, we may be in the process of discarding this
+ * dblock without writing it. In this case, dblock->file_size
+ * should be non-zero and have the correct size. Note that
+ * in this case, the direct block will have been detached,
+ * and thus looking up the parent will likely return incorrect
+ * data.
+ */
+ size = dblock->size;
+ compressed = TRUE;
+ compressed_size = dblock->size; /* will overwrite if compressed
+ * size is available.
+ */
+
+ if(dblock->file_size != 0)
+ compressed_size = dblock->file_size;
+ else {
+ if(par_iblock) {
+ unsigned par_entry; /* Entry in parent indirect block */
- HDassert(dblock->blk);
- p = dblock->blk;
+ par_entry = dblock->par_entry;
+ compressed_size = par_iblock->filt_ents[par_entry].size;
- /* Magic number */
- HDmemcpy(p, H5HF_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
+ } /* end if */
+ else {
+ compressed_size = hdr->pline_root_direct_size;
+ }
- /* Version # */
- *p++ = H5HF_DBLOCK_VERSION;
+ if(compressed_size == 0)
+ compressed_size = dblock->size;
- /* Address of heap header for heap which owns this block */
- H5F_addr_encode(f, &p, hdr->heap_addr);
+ } /* end else */
+ } /* end if */
+ else {
+ size = dblock->size;
+ compressed = FALSE;
+ compressed_size = 0; /* a convenient, invalid value */
+ }
- /* Offset of block in heap */
- UINT64ENCODE_VAR(p, dblock->block_off, hdr->heap_off_size);
+ HDassert(size > 0);
- /* Metadata checksum */
- if(hdr->checksum_dblocks) {
- uint32_t metadata_chksum; /* Computed metadata checksum value */
+ *image_len = size;
+ *compressed_ptr = compressed;
+ *compressed_image_len_ptr = compressed_size;
- /* Clear the checksum field, to compute the checksum */
- HDmemset(p, 0, (size_t)H5HF_SIZEOF_CHKSUM);
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HF__cache_dblock_image_len() */
- /* Compute checksum on entire direct block */
- metadata_chksum = H5_checksum_metadata(dblock->blk, dblock->size, 0);
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF__cache_dblock_pre_serialize
+ *
+ * Purpose: In principle, the purpose of this function is to determine
+ * the size and location of the disk image of the target direct
+ * block. In this case, the uncompressed size of the block is
+ * fixed, but sined the H5C__CLASS_COMPRESSED_FLAG is set,
+ * we may need to compute and report the compressed size.
+ *
+ * This is a bit sticky in the case of a direct block when I/O
+ * filters are enabled, as the size of the compressed version
+ * of the on disk image is not known until the direct block has
+ * been run through the filters. Further, the location of the
+ * on disk image may change if the compressed size of the image
+ * changes as well.
+ *
+ * To complicate matters further, the direct block may have been
+ * initially allocated in temporary (AKA imaginary) file space.
+ * In this case, we must relocate the direct block's on disk
+ * image to real file space regardless of whether it has changed
+ * size.
+ *
+ * One simplifying factor is the direct block's "blk" field,
+ * which contains a pointer to a buffer which (with the exception
+ * of a small header) contains the on disk image in uncompressed
+ * form.
+ *
+ * To square this particular circle, this function does
+ * everything the serialize function usually does, with the
+ * exception of copying the image into the image buffer provided
+ * to the serialize function by the metadata cache. The data to
+ * copy is provided to the serialize function in a buffer pointed
+ * to by the write_buf field.
+ *
+ * If I/O filters are enabled, on exit,
+ * H5HF__cache_dblock_pre_serialize() sets the write_buf field to
+ * point to a buffer containing the filtered image of the direct
+ * block. The serialize function should free this block, and set
+ * the write_buf field to NULL after copying it into the image
+ * buffer provided by the metadata cache.
+ *
+ * If I/O filters are not enabled, this function prepares
+ * the buffer pointed to by the blk field for copying to the
+ * image buffer provided by the metadata cache, and sets the
+ * write_buf field equal to the blk field. In this case, the
+ * serialize function should simply set the write_buf field to
+ * NULL after copying the direct block image into the image
+ * buffer.
+ *
+ * In both of the above cases, the length of the buffer pointed
+ * to by write_buf is provided in the write_len field. This
+ * field must contain 0 on entry to this function, and should
+ * be set back to 0 at the end of the serialize function.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF__cache_dblock_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
+ haddr_t addr, size_t len, size_t compressed_len, haddr_t *new_addr,
+ size_t H5_ATTR_UNUSED *new_len, size_t *new_compressed_len, unsigned *flags)
+{
+ hbool_t at_tmp_addr; /* Flag to indicate direct block is */
+ /* at temporary address */
+ haddr_t dblock_addr;
+ H5HF_hdr_t *hdr; /* Shared fractal heap information */
+ H5HF_direct_t *dblock = (H5HF_direct_t *)_thing; /* Direct block info */
+ H5HF_indirect_t *par_iblock; /* Parent indirect block */
+ unsigned par_entry; /* Entry in parent indirect block */
+ void *write_buf; /* Pointer to buffer to write out */
+ size_t write_size; /* Size of buffer to write out */
+ uint8_t *image; /* Pointer into raw data buffer */
+ unsigned dblock_flags = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Metadata checksum */
- UINT32ENCODE(p, metadata_chksum);
- } /* end if */
+ FUNC_ENTER_STATIC
- /* Sanity check */
- HDassert((size_t)(p - dblock->blk) == (size_t)H5HF_MAN_ABS_DIRECT_OVERHEAD(hdr));
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(dblock);
+ HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(dblock->cache_info.type == H5AC_FHEAP_DBLOCK);
+ HDassert(dblock->write_buf == NULL);
+ HDassert(dblock->write_size == 0);
+ HDassert(dblock->cache_info.size == len);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(len == dblock->size);
+ HDassert(new_addr);
+ HDassert(new_compressed_len);
+ HDassert(flags);
- /* Check for I/O filters on this heap */
- if(hdr->filter_len > 0) {
- H5Z_cb_t filter_cb = {NULL, NULL}; /* Filter callback structure */
- size_t nbytes; /* Number of bytes used */
- unsigned filter_mask = 0; /* Filter mask for block */
+ /* Set up local variables */
+ hdr = dblock->hdr;
+ dblock_addr = addr; /* will update dblock_addr if we move the block */
- /* Allocate buffer to perform I/O filtering on */
- write_size = dblock->size;
- if(NULL == (write_buf = H5MM_malloc(write_size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline buffer")
- HDmemcpy(write_buf, dblock->blk, write_size);
+ /* dblock->size must match dblock->cache_info.size */
+ HDassert(dblock->cache_info.size == dblock->size);
- /* Push direct block data through I/O filter pipeline */
- nbytes = write_size;
- if(H5Z_pipeline(&(hdr->pline), 0, &filter_mask, H5Z_ENABLE_EDC, filter_cb, &nbytes, &write_size, &write_buf) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "output pipeline failed")
+ /* Set the shared heap header's file context for this operation */
+ hdr->f = (H5F_t *)f;
- /* Use the compressed number of bytes as the size to write */
- write_size = nbytes;
+ HDassert(hdr);
+ HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
- /* Check for root direct block */
- if(dblock->parent == NULL) {
- hbool_t hdr_changed = FALSE; /* Whether the header information changed */
+ if(dblock->parent) {
+ /* this is the common case, in which the direct block is the child
+ * of an indirect block. Set up the convenience variables we will
+ * need if the address and/or compressed size of the on disk image
+ * of the direct block changes, and do some sanity checking in
+ * passing.
+ */
+ par_iblock = dblock->parent;
+ par_entry = dblock->par_entry;
- /* Sanity check */
- HDassert(H5F_addr_eq(hdr->man_dtable.table_addr, addr));
- HDassert(hdr->pline_root_direct_size > 0);
+ HDassert(par_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(par_iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
+ HDassert(H5F_addr_eq(par_iblock->ents[par_entry].addr, addr));
+ } /* end if */
+ else {
+ /* the direct block is a root direct block -- just set par_iblock
+ * to NULL, as the field will not be used.
+ */
+ par_iblock = NULL;
+ } /* end else */
- /* Check if the filter mask changed */
- if(hdr->pline_root_direct_filter_mask != filter_mask) {
- hdr->pline_root_direct_filter_mask = filter_mask;
- hdr_changed = TRUE;
- } /* end if */
+ at_tmp_addr = H5F_IS_TMP_ADDR(f, addr);
- /* Check if we need to re-size the block on disk */
- if(hdr->pline_root_direct_size != write_size || at_tmp_addr) {
- /* Check if the direct block is NOT currently allocated in temp. file space */
- /* (temp. file space does not need to be freed) */
- if(!at_tmp_addr) {
- /* Release direct block's current disk space */
- if(H5MF_xfree(f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, addr, (hsize_t)hdr->pline_root_direct_size) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap direct block")
- } /* end if */
+ /* Begin by preping the direct block to be written to disk. Do
+ * this by writing the correct magic number, the dblock version,
+ * the address of the header, the offset of the block in the heap,
+ * and the checksum at the beginning of the block.
+ */
- /* Allocate space for the compressed direct block */
- if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, (hsize_t)write_size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap direct block")
+ HDassert(dblock->blk);
+ image = dblock->blk;
- /* Let the metadata cache know, if the block moved */
- if(!H5F_addr_eq(hdr->man_dtable.table_addr, addr))
- if(H5AC_move_entry(f, H5AC_FHEAP_DBLOCK, hdr->man_dtable.table_addr, addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move direct block")
+ /* Magic number */
+ HDmemcpy(image, H5HF_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- /* Update information about compressed direct block's location & size */
- hdr->man_dtable.table_addr = addr;
- hdr->pline_root_direct_size = write_size;
+ /* Version # */
+ *image++ = H5HF_DBLOCK_VERSION;
- /* Note that heap header was modified */
- hdr_changed = TRUE;
- } /* end if */
+ /* Address of heap header for heap which owns this block */
+ H5F_addr_encode(f, &image, hdr->heap_addr);
- /* Check if heap header was modified */
- if(hdr_changed)
- if(H5HF_hdr_dirty(hdr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
- } /* end if */
- else {
- hbool_t par_changed = FALSE; /* Whether the parent's information changed */
- H5HF_indirect_t *par_iblock; /* Parent indirect block */
- unsigned par_entry; /* Entry in parent indirect block */
+ /* Offset of block in heap */
+ UINT64ENCODE_VAR(image, dblock->block_off, hdr->heap_off_size);
- /* Get parent information */
- par_iblock = dblock->parent;
- par_entry = dblock->par_entry;
+ /* Metadata checksum */
+ if(hdr->checksum_dblocks) {
+ uint32_t metadata_chksum; /* Computed metadata checksum value */
- /* Sanity check */
- HDassert(H5F_addr_eq(par_iblock->ents[par_entry].addr, addr));
- HDassert(par_iblock->filt_ents[par_entry].size > 0);
+ /* Clear the checksum field, to compute the checksum */
+ HDmemset(image, 0, (size_t)H5HF_SIZEOF_CHKSUM);
- /* Check if the filter mask changed */
- if(par_iblock->filt_ents[par_entry].filter_mask != filter_mask) {
- par_iblock->filt_ents[par_entry].filter_mask = filter_mask;
- par_changed = TRUE;
- } /* end if */
+ /* Compute checksum on entire direct block */
+ metadata_chksum = H5_checksum_metadata(dblock->blk, dblock->size, 0);
- /* Check if we need to re-size the block on disk */
- if(par_iblock->filt_ents[par_entry].size != write_size || at_tmp_addr) {
- /* Check if the direct block is NOT currently allocated in temp. file space */
- /* (temp. file space does not need to be freed) */
- if(!at_tmp_addr) {
- /* Release direct block's current disk space */
- if(H5MF_xfree(f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, addr, (hsize_t)par_iblock->filt_ents[par_entry].size) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap direct block")
- } /* end if */
+ /* Metadata checksum */
+ UINT32ENCODE(image, metadata_chksum);
+ } /* end if */
- /* Allocate space for the compressed direct block */
- if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, (hsize_t)write_size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap direct block")
+ /* at this point, dblock->blk should point to an uncompressed image of
+ * the direct block. If I/O filters are not enabled, this image should
+ * be ready to hand off to the metadata cache.
+ */
- /* Let the metadata cache know, if the block moved */
- if(!H5F_addr_eq(par_iblock->ents[par_entry].addr, addr))
- if(H5AC_move_entry(f, H5AC_FHEAP_DBLOCK, par_iblock->ents[par_entry].addr, addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move direct block")
+ /* Sanity check */
+ HDassert((size_t)(image - dblock->blk) == (size_t)H5HF_MAN_ABS_DIRECT_OVERHEAD(hdr));
- /* Update information about compressed direct block's location & size */
- par_iblock->ents[par_entry].addr = addr;
- par_iblock->filt_ents[par_entry].size = write_size;
+ /* If I/O filters are enabled on this heap, we must run the direct block
+ * image through the filters to obtain the image that we will hand off
+ * to the metadata cache.
+ */
- /* Note that parent was modified */
- par_changed = TRUE;
- } /* end if */
+ /* Check for I/O filters on this heap */
+ if(hdr->filter_len > 0) {
+ H5Z_cb_t filter_cb = {NULL, NULL}; /* Filter callback structure */
+ size_t nbytes; /* Number of bytes used */
+ unsigned filter_mask = 0; /* Filter mask for block */
- /* Check if parent was modified */
- if(par_changed)
- if(H5HF_iblock_dirty(par_iblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
- } /* end else */
- } /* end if */
- else {
- write_buf = dblock->blk;
- write_size = dblock->size;
+ /* Allocate buffer to perform I/O filtering on */
+ write_size = dblock->size;
- /* Check for needing to re-allocate direct block from 'temp.' to 'normal' file space */
- if(at_tmp_addr) {
- /* Check for root direct block */
- if(NULL == dblock->parent) {
- /* Sanity check */
- HDassert(H5F_addr_eq(hdr->man_dtable.table_addr, addr));
+ if(NULL == (write_buf = H5MM_malloc(write_size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline buffer")
+ HDmemcpy(write_buf, dblock->blk, write_size);
- /* Allocate 'normal' space for the direct block */
- if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, (hsize_t)write_size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap direct block")
+ /* Push direct block data through I/O filter pipeline */
+ nbytes = write_size;
+ if(H5Z_pipeline(&(hdr->pline), 0, &filter_mask, H5Z_ENABLE_EDC, filter_cb, &nbytes, &write_size, &write_buf) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "output pipeline failed")
+
+ /* Use the compressed number of bytes as the size to write */
+ write_size = nbytes;
+
+ /* If the size and/or location of the on disk image of the
+ * direct block changes, we must touch up its parent to reflect
+ * these changes. Do this differently depending on whether the
+ * direct block's parent is an indirect block or (rarely) the
+ * fractal heap header. In this case, the direct block is known
+ * as a root direct block.
+ */
- /* Sanity check */
- HDassert(!H5F_addr_eq(hdr->man_dtable.table_addr, addr));
+ /* Check for root direct block */
+ if(dblock->parent == NULL) {
+ hbool_t hdr_changed = FALSE; /* Whether the header info changed */
- /* Let the metadata cache know the block moved */
- if(H5AC_move_entry(f, H5AC_FHEAP_DBLOCK, hdr->man_dtable.table_addr, addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move direct block")
+ /* Sanity check */
+ HDassert(H5F_addr_eq(hdr->man_dtable.table_addr, addr));
+ HDassert(hdr->pline_root_direct_size > 0);
- /* Update information about direct block's location */
- hdr->man_dtable.table_addr = addr;
+ /* Check if the filter mask changed */
+ if(hdr->pline_root_direct_filter_mask != filter_mask) {
+ hdr->pline_root_direct_filter_mask = filter_mask;
+ hdr_changed = TRUE;
+ } /* end if */
+
+ /* verify that the cache's last record of the compressed
+ * size matches the heap's last record. This value will
+ * likely change shortly.
+ */
+ HDassert(compressed_len == hdr->pline_root_direct_size);
- /* Mark that heap header was modified */
- if(H5HF_hdr_dirty(hdr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
+ /* Check if we need to re-size the block on disk */
+ if(hdr->pline_root_direct_size != write_size || at_tmp_addr) {
+ /* Check if the direct block is NOT currently allocated
+ * in temp. file space
+ *
+ * (temp. file space does not need to be freed)
+ */
+ if(!at_tmp_addr) {
+ /* Release direct block's current disk space */
+ if(H5MF_xfree(f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, addr, (hsize_t)hdr->pline_root_direct_size) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap direct block")
} /* end if */
- else {
- H5HF_indirect_t *par_iblock; /* Parent indirect block */
- unsigned par_entry; /* Entry in parent indirect block */
- /* Get parent information */
- par_iblock = dblock->parent;
- par_entry = dblock->par_entry;
+ /* Allocate space for the compressed direct block */
+ if(HADDR_UNDEF == (dblock_addr = H5MF_alloc((H5F_t *)f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, (hsize_t)write_size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap direct block")
- /* Sanity check */
- HDassert(H5F_addr_eq(par_iblock->ents[par_entry].addr, addr));
+ /* Let the metadata cache know, if the block moved */
+ if(!H5F_addr_eq(hdr->man_dtable.table_addr, dblock_addr))
+ if(H5AC_move_entry((H5F_t *)f, H5AC_FHEAP_DBLOCK, hdr->man_dtable.table_addr, dblock_addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move direct block")
- /* Allocate 'normal' space for the direct block */
- if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, (hsize_t)write_size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap direct block")
+ /* Update information about compressed direct block's
+ * location & size
+ */
+ HDassert(hdr->man_dtable.table_addr == addr);
+ HDassert(hdr->pline_root_direct_size == compressed_len);
+ hdr->man_dtable.table_addr = dblock_addr;
+ hdr->pline_root_direct_size = write_size;
- /* Sanity check */
- HDassert(!H5F_addr_eq(par_iblock->ents[par_entry].addr, addr));
+ /* Note that heap header was modified */
+ hdr_changed = TRUE;
+ } /* end if */
+
+ /* Check if heap header was modified */
+ if(hdr_changed)
+ if(H5HF_hdr_dirty(hdr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
+ } /* end if */
+ else { /* the direct block's parent is an indirect block */
+ hbool_t par_changed = FALSE; /* Whether the parent's infochanged */
- /* Let the metadata cache know the block moved */
- if(H5AC_move_entry(f, H5AC_FHEAP_DBLOCK, par_iblock->ents[par_entry].addr, addr) < 0)
+ /* Sanity check */
+ HDassert(par_iblock);
+ HDassert(par_iblock->filt_ents[par_entry].size > 0);
+
+ /* Check if the filter mask changed */
+ if(par_iblock->filt_ents[par_entry].filter_mask != filter_mask) {
+ par_iblock->filt_ents[par_entry].filter_mask = filter_mask;
+ par_changed = TRUE;
+ } /* end if */
+
+ /* verify that the cache's last record of the compressed
+ * size matches the heap's last record. This value will
+ * likely change shortly.
+ */
+ HDassert(compressed_len == par_iblock->filt_ents[par_entry].size);
+
+ /* Check if we need to re-size the block on disk */
+ if(par_iblock->filt_ents[par_entry].size != write_size || at_tmp_addr) {
+ /* Check if the direct block is NOT currently allocated
+ * in temp. file space
+ *
+ * (temp. file space does not need to be freed)
+ */
+ if(!at_tmp_addr) {
+ /* Release direct block's current disk space */
+ if(H5MF_xfree(f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, addr, (hsize_t)par_iblock->filt_ents[par_entry].size) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap direct block")
+ } /* end if */
+
+ /* Allocate space for the compressed direct block */
+ if(HADDR_UNDEF == (dblock_addr = H5MF_alloc((H5F_t *)f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, (hsize_t)write_size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap direct block")
+
+ /* Let the metadata cache know, if the block moved */
+ if(!H5F_addr_eq(par_iblock->ents[par_entry].addr, dblock_addr))
+ if(H5AC_move_entry((H5F_t *)f, H5AC_FHEAP_DBLOCK, par_iblock->ents[par_entry].addr, dblock_addr) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move direct block")
- /* Update information about direct block's location */
- par_iblock->ents[par_entry].addr = addr;
+ /* Update information about compressed direct block's
+ * location & size
+ */
+ HDassert(par_iblock->ents[par_entry].addr == addr);
+ HDassert(par_iblock->filt_ents[par_entry].size == compressed_len);
+ par_iblock->ents[par_entry].addr = dblock_addr;
+ par_iblock->filt_ents[par_entry].size = write_size;
- /* Mark that parent was modified */
- if(H5HF_iblock_dirty(par_iblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
- } /* end else */
+ /* Note that parent was modified */
+ par_changed = TRUE;
} /* end if */
+
+ /* Check if parent was modified */
+ if(par_changed)
+ if(H5HF_iblock_dirty(par_iblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
} /* end else */
+ } /* end if */
+ else {
+ /* I/O filters are not enabled -- thus all we need to do is check to
+ * see if the direct block is in temporary (AKA imaginary) file
+ * space, and move it to real file space if it is.
+ *
+ * As in the I/O filters case above, we will have to touch up the
+ * direct blocks parent if the direct block is relocated.
+ *
+ * Recall that temporary file space need not be freed, which
+ * simplifies matters slightly.
+ */
+ write_buf = dblock->blk;
+ write_size = dblock->size;
- /* Direct block must be in 'normal' file space now */
- HDassert(!H5F_IS_TMP_ADDR(f, addr));
+ /* Check to see if we must re-allocate direct block from 'temp.'
+ * to 'normal' file space
+ */
+ if(at_tmp_addr) {
+ /* Check for root direct block */
+ if(NULL == dblock->parent) {
+ /* Sanity check */
+ HDassert(H5F_addr_eq(hdr->man_dtable.table_addr, addr));
- /* Write the direct block */
- if(H5F_block_write(f, H5FD_MEM_FHEAP_DBLOCK, addr, write_size, dxpl_id, write_buf) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFLUSH, FAIL, "unable to save fractal heap direct block to disk")
+ /* Allocate 'normal' space for the direct block */
+ if(HADDR_UNDEF == (dblock_addr = H5MF_alloc((H5F_t *)f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, (hsize_t)write_size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap direct block")
- /* Release the write buffer, if it was allocated */
- if(write_buf != dblock->blk)
- H5MM_xfree(write_buf);
+ /* Sanity check */
+ HDassert(!H5F_addr_eq(hdr->man_dtable.table_addr, dblock_addr));
- dblock->cache_info.is_dirty = FALSE;
- } /* end if */
+ /* Let the metadata cache know the block moved */
+ if(H5AC_move_entry((H5F_t *)f, H5AC_FHEAP_DBLOCK, hdr->man_dtable.table_addr, dblock_addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move direct block")
- if(destroy)
- if(H5HF_cache_dblock_dest(f, dblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy fractal heap direct block")
+ /* Update information about direct block's location */
+ hdr->man_dtable.table_addr = dblock_addr;
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5HF_cache_dblock_flush() */
+ /* Mark that heap header was modified */
+ if(H5HF_hdr_dirty(hdr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
+ } /* end if */
+ else { /* the direct block's parent is an indirect block */
+ /* Sanity check */
+ HDassert(par_iblock);
+ HDassert(H5F_addr_eq(par_iblock->ents[par_entry].addr, addr));
-
-/*-------------------------------------------------------------------------
- * Function: H5HF_cache_dblock_dest
- *
- * Purpose: Destroys a fractal heap direct block in memory.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 27 2006
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5HF_cache_dblock_dest(H5F_t *f, H5HF_direct_t *dblock)
-{
- herr_t ret_value = SUCCEED; /* Return value */
+ /* Allocate 'normal' space for the direct block */
+ if(HADDR_UNDEF == (dblock_addr = H5MF_alloc((H5F_t *)f, H5FD_MEM_FHEAP_DBLOCK, dxpl_id, (hsize_t)write_size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap direct block")
- FUNC_ENTER_NOAPI_NOINIT
+ /* Sanity check */
+ HDassert(!H5F_addr_eq(par_iblock->ents[par_entry].addr, dblock_addr));
- /*
- * Check arguments.
- */
- HDassert(dblock);
+ /* Let the metadata cache know the block moved */
+ if(H5AC_move_entry((H5F_t *)f, H5AC_FHEAP_DBLOCK, par_iblock->ents[par_entry].addr, dblock_addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move direct block")
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!dblock->cache_info.free_file_space_on_destroy || H5F_addr_defined(dblock->cache_info.addr));
+ /* Update information about direct block's location */
+ par_iblock->ents[par_entry].addr = dblock_addr;
- /* Check for freeing file space for direct block */
- if(dblock->cache_info.free_file_space_on_destroy) {
- /* Sanity check */
- HDassert(dblock->file_size > 0);
-
- /* Check if the direct block is NOT currently allocated in temp. file space */
- /* (temp. file space does not need to be freed) */
- if(!H5F_IS_TMP_ADDR(f, dblock->cache_info.addr)) {
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_FHEAP_DBLOCK, H5AC_dxpl_id, dblock->cache_info.addr, dblock->file_size) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap direct block")
+ /* Mark that parent was modified */
+ if(H5HF_iblock_dirty(par_iblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
+ } /* end else */
} /* end if */
+ } /* end else */
+
+ /* At this point, write_buf points to a buffer containing the image
+ * of the direct block that is ready to copy into the image buffer,
+ * and write_size contains the length of this buffer.
+ *
+ * Also, if image size or address has changed, the direct block's
+ * parent has been modified to reflect the change.
+ *
+ * Now, make note of the pointer and length of the above buffer for
+ * use by the serialize function.
+ */
+ dblock->write_buf = (uint8_t *)write_buf;
+ dblock->write_size = write_size;
+
+ /* finally, pass data back to the metadata cache as appropriate */
+ if(!H5F_addr_eq(addr, dblock_addr)) {
+ dblock_flags |= H5C__SERIALIZE_MOVED_FLAG;
+ *new_addr = dblock_addr;
} /* end if */
- /* Destroy fractal heap direct block */
- if(H5HF_man_dblock_dest(dblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy fractal heap direct block")
+ if((hdr->filter_len > 0) && (compressed_len != write_size)) {
+ dblock_flags |= H5C__SERIALIZE_COMPRESSED_FLAG;
+ *new_compressed_len = write_size;
+ } /* end if */
+
+ *flags = dblock_flags;
+
+ /* final sanity check */
+ HDassert(dblock->write_buf);
+ HDassert(dblock->write_size > 0);
done:
+ /* discard the write buf if we have an error */
+ if(write_buf && (write_buf != dblock->blk) && (dblock->write_buf == NULL))
+ H5MM_xfree(write_buf);
+
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HF_cache_dblock_dest() */
+} /* end H5HF__cache_dblock_pre_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_dblock_clear
+ * Function: H5HF__cache_dblock_serialize
*
- * Purpose: Mark a fractal heap direct block in memory as non-dirty.
+ * Purpose: In principle, this function is supposed to construct the on
+ * disk image of the direct block, and place that image in the
+ * image buffer provided by the metadata cache.
*
- * Return: Non-negative on success/Negative on failure
+ * However, since there are cases in which the pre_serialize
+ * function has to construct the on disk image to determine its size
+ * and address, this function simply copies the image prepared by
+ * the pre-serialize function into the supplied image buffer, and
+ * discards a buffer if necessary.
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 27 2006
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_dblock_clear(H5F_t *f, H5HF_direct_t *dblock, hbool_t destroy)
+static herr_t
+H5HF__cache_dblock_serialize(const H5F_t *f, void *image, size_t len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5HF_direct_t *dblock = (H5HF_direct_t *)_thing; /* Direct block info */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /*
- * Check arguments.
- */
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(image);
+ HDassert(len > 0);
HDassert(dblock);
+ HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(dblock->cache_info.type == H5AC_FHEAP_DBLOCK);
+ HDassert((dblock->blk != dblock->write_buf) || (dblock->cache_info.size == dblock->size));
+ HDassert(dblock->write_buf);
+ HDassert(dblock->write_size > 0);
+ HDassert((dblock->blk != dblock->write_buf) || (dblock->write_size == dblock->size));
+ HDassert(dblock->write_size == len);
+
+ /* Copy the image from *(dblock->write_buf) to *image */
+ HDmemcpy(image, dblock->write_buf, dblock->write_size);
+
+ /* Free *(dblock->write_buf) if it was allocated by the
+ * pre-serialize function
+ */
+ if(dblock->write_buf != dblock->blk)
+ H5MM_xfree(dblock->write_buf);
- /* Reset the dirty flag. */
- dblock->cache_info.is_dirty = FALSE;
-
- if(destroy)
- if(H5HF_cache_dblock_dest(f, dblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy fractal heap direct block")
+ /* Reset the write_buf and write_size fields */
+ dblock->write_buf = NULL;
+ dblock->write_size = 0;
-done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HF_cache_dblock_clear() */
+} /* end H5HF__cache_dblock_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_dblock_notify
+ * Function: H5HF__cache_dblock_notify
*
* Purpose: Setup / takedown flush dependencies as direct blocks
* are loaded / inserted and evicted from the metadata cache.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
* Programmer: John Mainzer
- * 5/17/14
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_dblock_notify(H5C_notify_action_t action, H5HF_direct_t *dblock)
+static herr_t
+H5HF__cache_dblock_notify(H5C_notify_action_t action, void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5HF_direct_t *dblock = (H5HF_direct_t *)_thing; /* Fractal heap direct block */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /*
- * Check arguments.
- */
+ /* Sanity checks */
HDassert(dblock);
HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(dblock->cache_info.type == H5AC_FHEAP_DBLOCK);
HDassert(dblock->hdr);
HDassert((dblock->fd_parent) ||
((dblock->hdr->man_dtable.curr_root_rows == 0) && (dblock->block_off == (hsize_t)0)));
switch(action) {
case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
HDassert(dblock->parent == dblock->fd_parent);
if(dblock->parent) { /* this is a leaf dblock */
/* create flush dependency with parent iblock */
@@ -1964,6 +2392,10 @@ H5HF_cache_dblock_notify(H5C_notify_action_t action, H5HF_direct_t *dblock)
} /* end else */
break;
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
HDassert((dblock->parent == dblock->fd_parent) ||
((NULL == dblock->parent) && (dblock->fd_parent)));
@@ -1986,39 +2418,47 @@ H5HF_cache_dblock_notify(H5C_notify_action_t action, H5HF_direct_t *dblock)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HF_cache_dblock_notify() */
+} /* end H5HF__cache_dblock_notify() */
/*-------------------------------------------------------------------------
- * Function: H5HF_cache_dblock_size
+ * Function: H5HF__cache_dblock_free_icr
*
- * Purpose: Compute the size in bytes of a fractal heap direct block
- * on disk, and return it in *size_ptr. On failure,
- * the value of *size_ptr is undefined.
+ * Purpose: Free the in core memory allocated to the supplied direct
+ * block.
*
- * Return: Non-negative on success/Negative on failure
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Feb 24 2006
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5HF_cache_dblock_size(const H5F_t H5_ATTR_UNUSED *f, const H5HF_direct_t *dblock, size_t *size_ptr)
+static herr_t
+H5HF__cache_dblock_free_icr(void *_thing)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ H5HF_direct_t *dblock = (H5HF_direct_t *)_thing; /* Fractal heap direct block */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* check arguments */
- HDassert(dblock);
- HDassert(size_ptr);
+ FUNC_ENTER_STATIC
- /* Set size value */
- *size_ptr = dblock->size;
+ /* Sanity checks */
+ HDassert(dblock);
+ HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(dblock->cache_info.type == H5AC_FHEAP_DBLOCK);
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HF_cache_dblock_size() */
+ /* Destroy fractal heap direct block */
+ if(H5HF_man_dblock_dest(dblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy fractal heap direct block")
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF__cache_dblock_free_icr() */
/*------------------------------------------------------------------------
@@ -2144,7 +2584,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
* in this case, since we know that the entry is in cache,
* we can pass NULL udata.
*/
- if(NULL == (root_iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC_READ)))
+ if(NULL == (root_iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5C__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
unprotect_root_iblock = TRUE;
} /* end if */
@@ -2205,7 +2645,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
* in this case, since we know that the entry is in cache,
* we can pass NULL udata.
*/
- if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC_READ)))
+ if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5C__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
unprotect_root_iblock = TRUE;
HDassert(iblock == root_iblock);
@@ -2595,7 +3035,7 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, hid_t dxpl_id,
/* in this case, since we know that the */
/* entry is in cache, we can pass NULL udata */
- if(NULL == (child_iblock = (H5HF_indirect_t *) H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, child_iblock_addr, NULL, H5AC_READ)))
+ if(NULL == (child_iblock = (H5HF_indirect_t *) H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, child_iblock_addr, NULL, H5C__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
unprotect_child_iblock = TRUE;
} /* end if */
diff --git a/src/H5HFdbg.c b/src/H5HFdbg.c
index 8620f6f..5183b67 100644
--- a/src/H5HFdbg.c
+++ b/src/H5HFdbg.c
@@ -323,7 +323,7 @@ H5HF_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
HDassert(fwidth >= 0);
/* Load the fractal heap header */
- if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, addr, H5AC_READ)))
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Print the information about the heap's header */
@@ -459,13 +459,13 @@ H5HF_dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream,
HDassert(block_size > 0);
/* Load the fractal heap header */
- if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, hdr_addr, H5AC_READ)))
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, hdr_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/*
* Load the heap direct block
*/
- if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, addr, block_size, NULL, 0, H5AC_READ)))
+ if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, addr, block_size, NULL, 0, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap direct block")
/* Print opening message */
@@ -716,13 +716,13 @@ H5HF_iblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream,
HDassert(nrows > 0);
/* Load the fractal heap header */
- if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, hdr_addr, H5AC_READ)))
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, hdr_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/*
* Load the heap indirect block
*/
- if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, addr, nrows, NULL, 0, FALSE, H5AC_READ, &did_protect)))
+ if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, addr, nrows, NULL, 0, FALSE, H5AC__READ_ONLY_FLAG, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap indirect block")
/* Print the information about the heap's indirect block */
@@ -825,7 +825,7 @@ H5HF_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr,
HDassert(fwidth >= 0);
/* Load the fractal heap header */
- if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_READ)))
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Initialize the free space information for the heap */
diff --git a/src/H5HFdblock.c b/src/H5HFdblock.c
index 9a8da4a..5dd5b0c 100644
--- a/src/H5HFdblock.c
+++ b/src/H5HFdblock.c
@@ -149,6 +149,9 @@ H5HF_man_dblock_create(hid_t dxpl_id, H5HF_hdr_t *hdr, H5HF_indirect_t *par_iblo
HDmemset(dblock->blk, 0, dblock->size);
#endif /* H5_CLEAR_MEMORY */
+ dblock->write_buf = NULL;
+ dblock->write_size = 0;
+
/* Allocate [temporary] space for the direct block on disk */
if(H5F_USE_TMP_SPACE(hdr->f)) {
if(HADDR_UNDEF == (dblock_addr = H5MF_alloc_tmp(hdr->f, (hsize_t)dblock->size)))
@@ -308,9 +311,13 @@ H5HF_man_dblock_destroy(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_direct_t *dblock,
} /* end if */
} /* end else */
- /* Indicate that the indirect block should be deleted & file space freed */
+ /* Indicate that the indirect block should be deleted */
dblock->file_size = dblock_size;
- cache_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
+ cache_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG;
+
+ /* If the dblock is in real file space, also tell the cache to free its file space */
+ if (!H5F_IS_TMP_ADDR(hdr->f, dblock_addr))
+ cache_flags |= H5AC__FREE_FILE_SPACE_FLAG;
done:
/* Unprotect the indirect block, with appropriate flags */
@@ -436,7 +443,7 @@ done:
H5HF_direct_t *
H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr,
size_t dblock_size, H5HF_indirect_t *par_iblock, unsigned par_entry,
- H5AC_protect_t rw)
+ unsigned flags)
{
H5HF_direct_t *dblock; /* Direct block from cache */
H5HF_dblock_cache_ud_t udata; /* parent and other infor for deserializing direct block */
@@ -451,6 +458,9 @@ H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr,
HDassert(H5F_addr_defined(dblock_addr));
HDassert(dblock_size > 0);
+ /* only H5AC__READ_ONLY_FLAG may appear in flags */
+ HDassert((flags & (~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Set up parent info */
udata.par_info.hdr = hdr;
udata.par_info.iblock = par_iblock;
@@ -485,7 +495,7 @@ H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr,
} /* end else */
/* Protect the direct block */
- if(NULL == (dblock = (H5HF_direct_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_DBLOCK, dblock_addr, &udata, rw)))
+ if(NULL == (dblock = (H5HF_direct_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_DBLOCK, dblock_addr, &udata, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap direct block")
/* Set the return value */
@@ -512,7 +522,7 @@ done:
herr_t
H5HF_man_dblock_locate(H5HF_hdr_t *hdr, hid_t dxpl_id, hsize_t obj_off,
H5HF_indirect_t **ret_iblock, unsigned *ret_entry, hbool_t *ret_did_protect,
- H5AC_protect_t rw)
+ unsigned flags)
{
haddr_t iblock_addr; /* Indirect block's address */
H5HF_indirect_t *iblock; /* Pointer to indirect block */
@@ -531,6 +541,9 @@ H5HF_man_dblock_locate(H5HF_hdr_t *hdr, hid_t dxpl_id, hsize_t obj_off,
HDassert(ret_iblock);
HDassert(ret_did_protect);
+ /* only H5AC__READ_ONLY_FLAG may appear in flags */
+ HDassert((flags & (~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Look up row & column for object */
if(H5HF_dtable_lookup(&hdr->man_dtable, obj_off, &row, &col) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCOMPUTE, FAIL, "can't compute row & column of object")
@@ -539,7 +552,7 @@ H5HF_man_dblock_locate(H5HF_hdr_t *hdr, hid_t dxpl_id, hsize_t obj_off,
iblock_addr = hdr->man_dtable.table_addr;
/* Lock root indirect block */
- if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, hdr->man_dtable.curr_root_rows, NULL, 0, FALSE, rw, &did_protect)))
+ if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, hdr->man_dtable.curr_root_rows, NULL, 0, FALSE, flags, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Check for indirect block row */
@@ -569,7 +582,7 @@ H5HF_man_dblock_locate(H5HF_hdr_t *hdr, hid_t dxpl_id, hsize_t obj_off,
} /* end if */
/* Lock child indirect block */
- if(NULL == (new_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, iblock, entry, FALSE, rw, &new_did_protect)))
+ if(NULL == (new_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, iblock, entry, FALSE, flags, &new_did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Release the current indirect block */
diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c
index cf8da23..340940f 100644
--- a/src/H5HFhdr.c
+++ b/src/H5HFhdr.c
@@ -529,7 +529,7 @@ done:
*-------------------------------------------------------------------------
*/
H5HF_hdr_t *
-H5HF_hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
+H5HF_hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, unsigned flags)
{
H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */
H5HF_hdr_t *hdr; /* Fractal heap header */
@@ -541,12 +541,15 @@ H5HF_hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
HDassert(f);
HDassert(H5F_addr_defined(addr));
+ /* only H5AC__READ_ONLY_FLAG may appear in flags */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Set up userdata for protect call */
cache_udata.f = f;
cache_udata.dxpl_id = dxpl_id;
/* Lock the heap header into memory */
- if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, addr, &cache_udata, rw)))
+ if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, addr, &cache_udata, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap header")
/* Set the header's address */
@@ -1109,7 +1112,7 @@ H5HF_hdr_update_iter(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t min_dblock_size)
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, FAIL, "can't allocate fractal heap indirect block")
/* Lock new indirect block */
- if(NULL == (new_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, new_iblock_addr, child_nrows, iblock, next_entry, FALSE, H5AC_WRITE, &did_protect)))
+ if(NULL == (new_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, new_iblock_addr, child_nrows, iblock, next_entry, FALSE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Move iterator down one level (pins indirect block) */
@@ -1303,7 +1306,7 @@ H5HF_hdr_reverse_iter(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr)
child_nrows = H5HF_dtable_size_to_rows(&hdr->man_dtable, hdr->man_dtable.row_block_size[row]);
/* Lock child indirect block */
- if(NULL == (child_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock->ents[curr_entry].addr, child_nrows, iblock, curr_entry, FALSE, H5AC_WRITE, &did_protect)))
+ if(NULL == (child_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock->ents[curr_entry].addr, child_nrows, iblock, curr_entry, FALSE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Set the current location of the iterator */
diff --git a/src/H5HFiblock.c b/src/H5HFiblock.c
index 4473803..547aaf0 100644
--- a/src/H5HFiblock.c
+++ b/src/H5HFiblock.c
@@ -330,8 +330,15 @@ H5HF_iblock_decr(H5HF_indirect_t *iblock)
/* Check for expunging the indirect block from the metadata cache */
if(expunge_iblock) {
- /* Evict the indirect block from the metadata cache */
- if(H5AC_expunge_entry(hdr->f, H5AC_dxpl_id, H5AC_FHEAP_IBLOCK, iblock_addr, H5AC__FREE_FILE_SPACE_FLAG) < 0)
+ unsigned cache_flags = H5AC__NO_FLAGS_SET;
+
+ /* if the indirect block is in real file space, tell
+ * the cache to free its file space.
+ */
+ if (!H5F_IS_TMP_ADDR(hdr->f, iblock_addr))
+ cache_flags |= H5AC__FREE_FILE_SPACE_FLAG;
+
+ if(H5AC_expunge_entry(hdr->f, H5AC_dxpl_id, H5AC_FHEAP_IBLOCK, iblock_addr, cache_flags) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "unable to remove indirect block from cache")
} /* end if */
} /* end if */
@@ -424,7 +431,7 @@ H5HF_man_iblock_root_create(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t min_dblock_si
/* Move current direct block (used as root) into new indirect block */
/* Lock new indirect block */
- if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, NULL, 0, FALSE, H5AC_WRITE, &did_protect)))
+ if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, NULL, 0, FALSE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Check if there's already a direct block as root) */
@@ -433,7 +440,7 @@ H5HF_man_iblock_root_create(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t min_dblock_si
H5HF_direct_t *dblock; /* Pointer to direct block to query */
/* Lock first (root) direct block */
- if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, hdr->man_dtable.table_addr, hdr->man_dtable.cparam.start_block_size, NULL, 0, H5AC_WRITE)))
+ if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, hdr->man_dtable.table_addr, hdr->man_dtable.cparam.start_block_size, NULL, 0, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap direct block")
/* Attach direct block to new root indirect block */
@@ -879,7 +886,7 @@ H5HF_man_iblock_root_revert(H5HF_indirect_t *root_iblock, hid_t dxpl_id)
dblock_size = hdr->man_dtable.cparam.start_block_size;
/* Get pointer to last direct block */
- if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, root_iblock, 0, H5AC_WRITE)))
+ if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, root_iblock, 0, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap direct block")
HDassert(dblock->parent == root_iblock);
HDassert(dblock->par_entry == 0);
@@ -1159,7 +1166,7 @@ done:
H5HF_indirect_t *
H5HF_man_iblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr,
unsigned iblock_nrows, H5HF_indirect_t *par_iblock, unsigned par_entry,
- hbool_t must_protect, H5AC_protect_t rw, hbool_t *did_protect)
+ hbool_t must_protect, unsigned flags, hbool_t *did_protect)
{
H5HF_parent_t par_info; /* Parent info for loading block */
H5HF_indirect_t *iblock = NULL; /* Indirect block from cache */
@@ -1176,6 +1183,9 @@ H5HF_man_iblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr,
HDassert(iblock_nrows > 0);
HDassert(did_protect);
+ /* only H5AC__READ_ONLY_FLAG may appear in flags */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Check if we are allowed to use existing pinned iblock pointer */
if(!must_protect) {
/* Check for this block already being pinned */
@@ -1235,7 +1245,7 @@ H5HF_man_iblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr,
cache_udata.nrows = &iblock_nrows;
/* Protect the indirect block */
- if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_IBLOCK, iblock_addr, &cache_udata, rw)))
+ if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_IBLOCK, iblock_addr, &cache_udata, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap indirect block")
/* Set the indirect block's address */
@@ -1579,7 +1589,7 @@ H5HF_man_iblock_delete(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr,
HDassert(iblock_nrows > 0);
/* Lock indirect block */
- if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, iblock_nrows, par_iblock, par_entry, TRUE, H5AC_WRITE, &did_protect)))
+ if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, iblock_nrows, par_iblock, par_entry, TRUE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
HDassert(iblock->nchildren > 0);
HDassert(did_protect == TRUE);
@@ -1637,8 +1647,14 @@ H5HF_man_iblock_delete(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr,
}
#endif /* NDEBUG */
- /* Indicate that the indirect block should be deleted & file space freed */
- cache_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
+ /* Indicate that the indirect block should be deleted */
+ cache_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG;
+
+ /* If the indirect block is in real file space, tell
+ * the cache to free its file space as well.
+ */
+ if (!H5F_IS_TMP_ADDR(hdr->f, iblock_addr))
+ cache_flags |= H5AC__FREE_FILE_SPACE_FLAG;
done:
/* Unprotect the indirect block, with appropriate flags */
@@ -1680,7 +1696,7 @@ H5HF_man_iblock_size(H5F_t *f, hid_t dxpl_id, H5HF_hdr_t *hdr, haddr_t iblock_ad
HDassert(heap_size);
/* Protect the indirect block */
- if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, par_iblock, par_entry, FALSE, H5AC_READ, &did_protect)))
+ if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, par_iblock, par_entry, FALSE, H5AC__READ_ONLY_FLAG, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap indirect block")
/* Accumulate size of this indirect block */
diff --git a/src/H5HFiter.c b/src/H5HFiter.c
index 137d0ee..262a9ee 100644
--- a/src/H5HFiter.c
+++ b/src/H5HFiter.c
@@ -217,7 +217,7 @@ H5HF_man_iter_start_offset(H5HF_hdr_t *hdr, hid_t dxpl_id,
} /* end else */
/* Load indirect block for this context location */
- if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, iblock_nrows, iblock_parent, iblock_par_entry, FALSE, H5AC_WRITE, &did_protect)))
+ if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, iblock_nrows, iblock_parent, iblock_par_entry, FALSE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Make indirect block the context for the current location */
diff --git a/src/H5HFman.c b/src/H5HFman.c
index 58dab10..5f95a91 100644
--- a/src/H5HFman.c
+++ b/src/H5HFman.c
@@ -161,7 +161,7 @@ H5HF_man_insert(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t obj_size, const void *obj
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't retrieve direct block information")
/* Lock direct block */
- if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, sec_node->u.single.parent, sec_node->u.single.par_entry, H5AC_WRITE)))
+ if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, sec_node->u.single.parent, sec_node->u.single.par_entry, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load fractal heap direct block")
/* Insert object into block */
@@ -274,7 +274,11 @@ H5HF_man_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
H5HF_operator_t op, void *op_data, unsigned op_flags)
{
H5HF_direct_t *dblock = NULL; /* Pointer to direct block to query */
- H5AC_protect_t dblock_access; /* Access method for direct block */
+ unsigned dblock_access_flags; /* Access method for direct block */
+ /* must equal either
+ * H5AC__NO_FLAGS_SET or
+ * H5AC__READ_ONLY_FLAG
+ */
haddr_t dblock_addr; /* Direct block address */
size_t dblock_size; /* Direct block size */
unsigned dblock_cache_flags; /* Flags for unprotecting direct block */
@@ -298,11 +302,11 @@ H5HF_man_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
/* Check pipeline */
H5HF_MAN_WRITE_CHECK_PLINE(hdr)
- dblock_access = H5AC_WRITE;
+ dblock_access_flags = H5AC__NO_FLAGS_SET;
dblock_cache_flags = H5AC__DIRTIED_FLAG;
} /* end if */
else {
- dblock_access = H5AC_READ;
+ dblock_access_flags = H5AC__READ_ONLY_FLAG;
dblock_cache_flags = H5AC__NO_FLAGS_SET;
} /* end else */
@@ -332,7 +336,7 @@ H5HF_man_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
dblock_size = hdr->man_dtable.cparam.start_block_size;
/* Lock direct block */
- if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, NULL, 0, dblock_access)))
+ if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, NULL, 0, dblock_access_flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap direct block")
} /* end if */
else {
@@ -341,7 +345,7 @@ H5HF_man_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
unsigned entry; /* Entry of block */
/* Look up indirect block containing direct block */
- if(H5HF_man_dblock_locate(hdr, dxpl_id, obj_off, &iblock, &entry, &did_protect, H5AC_READ) < 0)
+ if(H5HF_man_dblock_locate(hdr, dxpl_id, obj_off, &iblock, &entry, &did_protect, H5AC__READ_ONLY_FLAG) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCOMPUTE, FAIL, "can't compute row & column of section")
/* Set direct block info */
@@ -359,7 +363,7 @@ H5HF_man_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
} /* end if */
/* Lock direct block */
- if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, iblock, entry, dblock_access))) {
+ if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, iblock, entry, dblock_access_flags))) {
/* Unlock indirect block */
if(H5HF_man_iblock_unprotect(iblock, dxpl_id, H5AC__NO_FLAGS_SET, did_protect) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "unable to release fractal heap indirect block")
@@ -578,7 +582,7 @@ H5HF_man_remove(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id)
} /* end if */
else {
/* Look up indirect block containing direct block */
- if(H5HF_man_dblock_locate(hdr, dxpl_id, obj_off, &iblock, &dblock_entry, &did_protect, H5AC_WRITE) < 0)
+ if(H5HF_man_dblock_locate(hdr, dxpl_id, obj_off, &iblock, &dblock_entry, &did_protect, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCOMPUTE, FAIL, "can't compute row & column of section")
/* Check for offset of invalid direct block */
diff --git a/src/H5HFpkg.h b/src/H5HFpkg.h
index 63c1a3e..4655d83 100644
--- a/src/H5HFpkg.h
+++ b/src/H5HFpkg.h
@@ -341,7 +341,7 @@ typedef struct H5HF_hdr_t {
size_t rc; /* Reference count of heap's components using heap header */
haddr_t heap_addr; /* Address of heap header in the file */
size_t heap_size; /* Size of heap header in the file */
- H5AC_protect_t mode; /* Access mode for heap */
+ unsigned mode; /* Access mode for heap */
H5F_t *f; /* Pointer to file for heap */
size_t file_rc; /* Reference count of files using heap header */
hbool_t pending_delete; /* Heap is pending deletion */
@@ -424,6 +424,31 @@ typedef struct H5HF_direct_t {
size_t size; /* Size of direct block */
hsize_t file_size; /* Size of direct block in file (only valid when block's space is being freed) */
uint8_t *blk; /* Pointer to buffer containing block data */
+ uint8_t *write_buf; /* Pointer to buffer containing the block data */
+ /* in form ready to copy to the metadata */
+ /* cache's image buffer. */
+ /* */
+ /* This field is used by */
+ /* H5HF_cache_dblock_pre_serialize() to pass */
+ /* the serialized image of the direct block to */
+ /* H5HF_cache_dblock_serialize(). It should */
+ /* NULL at all other times. */
+ /* */
+ /* If I/O filters are enabled, the pre- */
+ /* the pre-serialize function will allocate */
+ /* a buffer, copy the filtered version of the */
+ /* direct block image into it, and place the */
+ /* base address of the buffer in this field. */
+ /* The serialize function must discard this */
+ /* buffer after it copies the contents into */
+ /* the image buffer provided by the metadata */
+ /* cache. */
+ /* */
+ /* If I/O filters are not enabled, the */
+ /* write_buf field is simply set equal to the */
+ /* blk field by the pre-serialize function, */
+ /* and back to NULL by the serialize function. */
+ size_t write_size; /* size of the buffer pointed to by write_buf. */
/* Stored values */
hsize_t block_off; /* Offset of the block within the heap's address space */
@@ -597,7 +622,7 @@ H5_DLL hsize_t H5HF_dtable_span_size(const H5HF_dtable_t *dtable, unsigned start
H5_DLL H5HF_hdr_t * H5HF_hdr_alloc(H5F_t *f);
H5_DLL haddr_t H5HF_hdr_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam);
H5_DLL H5HF_hdr_t *H5HF_hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr,
- H5AC_protect_t rw);
+ unsigned flags);
H5_DLL herr_t H5HF_hdr_finish_init_phase1(H5HF_hdr_t *hdr);
H5_DLL herr_t H5HF_hdr_finish_init_phase2(H5HF_hdr_t *hdr);
H5_DLL herr_t H5HF_hdr_finish_init(H5HF_hdr_t *hdr);
@@ -638,7 +663,7 @@ H5_DLL herr_t H5HF_man_iblock_create(H5HF_hdr_t *hdr, hid_t dxpl_id,
H5_DLL H5HF_indirect_t *H5HF_man_iblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id,
haddr_t iblock_addr, unsigned iblock_nrows,
H5HF_indirect_t *par_iblock, unsigned par_entry, hbool_t must_protect,
- H5AC_protect_t rw, hbool_t *did_protect);
+ unsigned flags, hbool_t *did_protect);
H5_DLL herr_t H5HF_man_iblock_unprotect(H5HF_indirect_t *iblock, hid_t dxpl_id,
unsigned cache_flags, hbool_t did_protect);
H5_DLL herr_t H5HF_man_iblock_attach(H5HF_indirect_t *iblock, unsigned entry,
@@ -664,10 +689,10 @@ H5_DLL herr_t H5HF_man_dblock_destroy(H5HF_hdr_t *hdr, hid_t dxpl_id,
H5_DLL H5HF_direct_t *H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id,
haddr_t dblock_addr, size_t dblock_size,
H5HF_indirect_t *par_iblock, unsigned par_entry,
- H5AC_protect_t rw);
+ unsigned flags);
H5_DLL herr_t H5HF_man_dblock_locate(H5HF_hdr_t *hdr, hid_t dxpl_id,
hsize_t obj_off, H5HF_indirect_t **par_iblock,
- unsigned *par_entry, hbool_t *par_did_protect, H5AC_protect_t rw);
+ unsigned *par_entry, hbool_t *par_did_protect, unsigned flags);
H5_DLL herr_t H5HF_man_dblock_delete(H5F_t *f, hid_t dxpl_id, haddr_t dblock_addr,
hsize_t dblock_size);
H5_DLL herr_t H5HF_man_dblock_dest(H5HF_direct_t *dblock);
diff --git a/src/H5HFsection.c b/src/H5HFsection.c
index 01a7b4a..c997119 100644
--- a/src/H5HFsection.c
+++ b/src/H5HFsection.c
@@ -555,7 +555,7 @@ H5HF_sect_single_locate_parent(H5HF_hdr_t *hdr, hid_t dxpl_id, hbool_t refresh,
HDassert(sect);
/* Look up indirect block containing direct blocks for range */
- if(H5HF_man_dblock_locate(hdr, dxpl_id, sect->sect_info.addr, &sec_iblock, &sec_entry, &did_protect, H5AC_READ) < 0)
+ if(H5HF_man_dblock_locate(hdr, dxpl_id, sect->sect_info.addr, &sec_iblock, &sec_entry, &did_protect, H5AC__READ_ONLY_FLAG) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCOMPUTE, FAIL, "can't compute row & column of section")
/* Increment reference count on indirect block that free section is in */
@@ -776,7 +776,7 @@ H5HF_sect_single_full_dblock(H5HF_hdr_t *hdr, hid_t dxpl_id,
hdr->man_dtable.curr_root_rows > 0) {
H5HF_direct_t *dblock; /* Pointer to direct block for section */
- if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, sect->u.single.parent, sect->u.single.par_entry, H5AC_WRITE)))
+ if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, sect->u.single.parent, sect->u.single.par_entry, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load fractal heap direct block")
HDassert(H5F_addr_eq(dblock->block_off + dblock_overhead, sect->sect_info.addr));
@@ -1094,7 +1094,7 @@ H5HF_sect_single_shrink(H5FS_section_info_t **_sect, void H5_ATTR_UNUSED *_udata
/* (should be a root direct block) */
HDassert(dblock_addr == hdr->man_dtable.table_addr);
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr,
- dblock_size, (*sect)->u.single.parent, (*sect)->u.single.par_entry, H5AC_WRITE)))
+ dblock_size, (*sect)->u.single.parent, (*sect)->u.single.par_entry, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load fractal heap direct block")
HDassert(H5F_addr_eq(dblock->block_off + dblock_size, (*sect)->sect_info.addr + (*sect)->sect_info.size));
@@ -1221,7 +1221,7 @@ H5HF_sect_single_valid(const H5FS_section_class_t H5_ATTR_UNUSED *cls, const H5F
H5HF_direct_t *dblock; /* Direct block for section */
/* Protect the direct block for the section */
- dblock = H5HF_man_dblock_protect(iblock->hdr, H5AC_dxpl_id, dblock_addr, dblock_size, iblock, sect->u.single.par_entry, H5AC_READ);
+ dblock = H5HF_man_dblock_protect(iblock->hdr, H5AC_dxpl_id, dblock_addr, dblock_size, iblock, sect->u.single.par_entry, H5AC__READ_ONLY_FLAG);
HDassert(dblock);
/* Sanity check settings for section */
@@ -2536,7 +2536,7 @@ H5HF_sect_indirect_init_rows(H5HF_hdr_t *hdr, hid_t dxpl_id,
/* If the child indirect block's address is defined, protect it */
if(H5F_addr_defined(child_iblock_addr)) {
- if(NULL == (child_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, child_iblock_addr, child_nrows, sect->u.indirect.u.iblock, curr_entry, FALSE, H5AC_WRITE, &did_protect)))
+ if(NULL == (child_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, child_iblock_addr, child_nrows, sect->u.indirect.u.iblock, curr_entry, FALSE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
} /* end if */
else
@@ -2771,7 +2771,7 @@ H5HF_sect_indirect_revive_row(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_
HDassert(sect->sect_info.state == H5FS_SECT_SERIALIZED);
/* Look up indirect block containing indirect blocks for section */
- if(H5HF_man_dblock_locate(hdr, dxpl_id, sect->sect_info.addr, &sec_iblock, NULL, &did_protect, H5AC_READ) < 0)
+ if(H5HF_man_dblock_locate(hdr, dxpl_id, sect->sect_info.addr, &sec_iblock, NULL, &did_protect, H5AC__READ_ONLY_FLAG) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCOMPUTE, FAIL, "can't compute row & column of section")
/* Increment reference count on indirect block that free section is in */
diff --git a/src/H5HG.c b/src/H5HG.c
index 21c93bd..4b2eb78 100644
--- a/src/H5HG.c
+++ b/src/H5HG.c
@@ -246,7 +246,7 @@ done:
*-------------------------------------------------------------------------
*/
H5HG_heap_t *
-H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
+H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, unsigned flags)
{
H5HG_heap_t *heap; /* Global heap */
H5HG_heap_t *ret_value; /* Return value */
@@ -257,8 +257,11 @@ H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
HDassert(f);
HDassert(H5F_addr_defined(addr));
+ /* only H5AC__READ_ONLY_FLAG may appear in flags */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Lock the heap into memory */
- if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, addr, f, rw)))
+ if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, addr, f, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect global heap")
/* Set the heap's address */
@@ -440,7 +443,7 @@ H5HG_extend(H5F_t *f, hid_t dxpl_id, haddr_t addr, size_t need)
HDassert(H5F_addr_defined(addr));
/* Protect the heap */
- if(NULL == (heap = H5HG_protect(f, dxpl_id, addr, H5AC_WRITE)))
+ if(NULL == (heap = H5HG_protect(f, dxpl_id, addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
/* Re-allocate the heap information in memory */
@@ -554,7 +557,7 @@ H5HG_insert(H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*/
} /* end if */
HDassert(H5F_addr_defined(addr));
- if(NULL == (heap = H5HG_protect(f, dxpl_id, addr, H5AC_WRITE)))
+ if(NULL == (heap = H5HG_protect(f, dxpl_id, addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
/* Split the free space to make room for the new object */
@@ -618,7 +621,7 @@ H5HG_read(H5F_t *f, hid_t dxpl_id, H5HG_t *hobj, void *object/*out*/,
HDassert(hobj);
/* Load the heap */
- if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC_READ)))
+ if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect global heap")
HDassert(hobj->idx < heap->nused);
@@ -692,7 +695,7 @@ H5HG_link(H5F_t *f, hid_t dxpl_id, const H5HG_t *hobj, int adjust)
HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "no write intent on file")
/* Load the heap */
- if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC_WRITE)))
+ if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
if(adjust != 0) {
@@ -744,7 +747,7 @@ H5HG_get_obj_size(H5F_t *f, hid_t dxpl_id, H5HG_t *hobj, size_t *obj_size)
HDassert(obj_size);
/* Load the heap */
- if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC_READ)))
+ if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
HDassert(hobj->idx < heap->nused);
@@ -799,7 +802,7 @@ H5HG_remove (H5F_t *f, hid_t dxpl_id, H5HG_t *hobj)
HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "no write intent on file")
/* Load the heap */
- if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC_WRITE)))
+ if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
HDassert(hobj->idx < heap->nused);
diff --git a/src/H5HGcache.c b/src/H5HGcache.c
index aac73ed..5c6bee1 100644
--- a/src/H5HGcache.c
+++ b/src/H5HGcache.c
@@ -62,12 +62,14 @@
/********************/
/* Metadata cache callbacks */
-static H5HG_heap_t *H5HG_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5HG_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr,
- H5HG_heap_t *heap, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5HG_dest(H5F_t *f, H5HG_heap_t *heap);
-static herr_t H5HG_clear(H5F_t *f, H5HG_heap_t *heap, hbool_t destroy);
-static herr_t H5HG_size(const H5F_t *f, const H5HG_heap_t *heap, size_t *size_ptr);
+static herr_t H5HG__cache_heap_get_load_size(const void *udata, size_t *image_len);
+static void *H5HG__cache_heap_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5HG__cache_heap_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5HG__cache_heap_serialize(const H5F_t *f, void *image,
+ size_t len, void *thing);
+static herr_t H5HG__cache_heap_free_icr(void *thing);
/*********************/
@@ -76,13 +78,19 @@ static herr_t H5HG_size(const H5F_t *f, const H5HG_heap_t *heap, size_t *size_pt
/* H5HG inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_GHEAP[1] = {{
- H5AC_GHEAP_ID,
- (H5AC_load_func_t)H5HG_load,
- (H5AC_flush_func_t)H5HG_flush,
- (H5AC_dest_func_t)H5HG_dest,
- (H5AC_clear_func_t)H5HG_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5HG_size,
+ H5AC_GHEAP_ID, /* Metadata client ID */
+ "global heap", /* Metadata client name (for debugging) */
+ H5FD_MEM_GHEAP, /* File space memory type for client */
+ H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
+ H5HG__cache_heap_get_load_size, /* 'get_load_size' callback */
+ H5HG__cache_heap_deserialize, /* 'deserialize' callback */
+ H5HG__cache_heap_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5HG__cache_heap_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5HG__cache_heap_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
@@ -98,162 +106,204 @@ const H5AC_class_t H5AC_GHEAP[1] = {{
/*-------------------------------------------------------------------------
- * Function: H5HG_load
+ * Function: H5HG__cache_heap_get_load_size()
*
- * Purpose: Loads a global heap collection from disk.
+ * Purpose: Return the initial speculative read size to the metadata
+ * cache. This size will be used in the initial attempt to read
+ * the global heap. If this read is too small, the cache will
+ * try again with the correct value obtained from
+ * H5HG__cache_heap_image_len().
*
- * Return: Success: Ptr to a global heap collection.
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Failure: NULL
+ * Programmer: John Mainzer
+ * 7/27/14
*
- * Programmer: Robb Matzke
- * Friday, March 27, 1998
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HG__cache_heap_get_load_size(const void H5_ATTR_UNUSED *_udata, size_t *image_len)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ HDassert(image_len);
+
+ *image_len = (size_t)H5HG_MINSIZE;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HG__cache_heap_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HG__cache_heap_deserialize
+ *
+ * Purpose: Given a buffer containing the on disk image of the global
+ * heap, deserialize it, load its contents into a newly allocated
+ * instance of H5HG_heap_t, and return a pointer to the new instance.
+ *
+ * Note that this heap client uses speculative reads. If the supplied
+ * buffer is too small, we simply make note of the correct size, and
+ * wait for the metadata cache to try again.
+ *
+ * Return: Success: Pointer to in core representation
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer
+ * 7/27/14
*
*-------------------------------------------------------------------------
*/
-static H5HG_heap_t *
-H5HG_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
+static void *
+H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
{
- H5HG_heap_t *heap = NULL;
- uint8_t *p;
- size_t nalloc, need;
- size_t max_idx = 0; /* The maximum index seen */
- H5HG_heap_t *ret_value = NULL; /* Return value */
+ H5F_t *f = (H5F_t *)_udata; /* File pointer -- obtained from user data */
+ H5HG_heap_t *heap = NULL; /* New global heap */
+ uint8_t *image; /* Pointer to image to decode */
+ void *ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* check arguments */
+ /* Sanity checks */
+ HDassert(_image);
+ HDassert(len >= (size_t)H5HG_MINSIZE);
HDassert(f);
- HDassert(H5F_addr_defined(addr));
- HDassert(udata);
+ HDassert(dirty);
- /* Read the initial 4k page */
+ /* Allocate a new global heap */
if(NULL == (heap = H5FL_CALLOC(H5HG_heap_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
heap->shared = H5F_SHARED(f);
- if(NULL == (heap->chunk = H5FL_BLK_MALLOC(gheap_chunk, (size_t)H5HG_MINSIZE)))
+ if(NULL == (heap->chunk = H5FL_BLK_MALLOC(gheap_chunk, len)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- if(H5F_block_read(f, H5FD_MEM_GHEAP, addr, (size_t)H5HG_MINSIZE, dxpl_id, heap->chunk) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "unable to read global heap collection")
- p = heap->chunk;
+
+ /* copy the image buffer into the newly allocate chunk */
+ HDmemcpy(heap->chunk, _image, len);
+
+ image = heap->chunk;
/* Magic number */
- if(HDmemcmp(p, H5HG_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "bad global heap collection signature")
- p += H5_SIZEOF_MAGIC;
+ if(HDmemcmp(image, H5HG_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "bad global heap collection signature")
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(H5HG_VERSION != *p++)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong version number in global heap")
+ if(H5HG_VERSION != *image++)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong version number in global heap")
/* Reserved */
- p += 3;
+ image += 3;
/* Size */
- H5F_DECODE_LENGTH(f, p, heap->size);
+ H5F_DECODE_LENGTH(f, image, heap->size);
HDassert(heap->size >= H5HG_MINSIZE);
-
- /*
- * If we didn't read enough in the first try, then read the rest of the
- * collection now.
- */
- if(heap->size > H5HG_MINSIZE) {
- haddr_t next_addr = addr + (hsize_t)H5HG_MINSIZE;
-
- if(NULL == (heap->chunk = H5FL_BLK_REALLOC(gheap_chunk, heap->chunk, heap->size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- if(H5F_block_read(f, H5FD_MEM_GHEAP, next_addr, (heap->size - H5HG_MINSIZE), dxpl_id, heap->chunk + H5HG_MINSIZE) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "unable to read global heap collection")
- } /* end if */
-
- /* Decode each object */
- p = heap->chunk + H5HG_SIZEOF_HDR(f);
- nalloc = H5HG_NOBJS(f, heap->size);
-
- /* Calloc the obj array because the file format spec makes no guarantee
- * about the order of the objects, and unused slots must be set to zero.
- */
- if(NULL == (heap->obj = H5FL_SEQ_CALLOC(H5HG_obj_t, nalloc)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
-
- heap->nalloc = nalloc;
- while(p < (heap->chunk + heap->size)) {
- if((p + H5HG_SIZEOF_OBJHDR(f)) > (heap->chunk + heap->size)) {
- /*
- * The last bit of space is too tiny for an object header, so we
- * assume that it's free space.
- */
- HDassert(NULL == heap->obj[0].begin);
- heap->obj[0].size = (size_t)(((const uint8_t *)heap->chunk + heap->size) - p);
- heap->obj[0].begin = p;
- p += heap->obj[0].size;
- } /* end if */
- else {
- unsigned idx;
- uint8_t *begin = p;
-
- UINT16DECODE(p, idx);
-
- /* Check if we need more room to store heap objects */
- if(idx >= heap->nalloc) {
- size_t new_alloc; /* New allocation number */
- H5HG_obj_t *new_obj; /* New array of object descriptions */
-
- /* Determine the new number of objects to index */
- new_alloc = MAX(heap->nalloc * 2, (idx + 1));
- HDassert(idx < new_alloc);
-
- /* Reallocate array of objects */
- if(NULL == (new_obj = H5FL_SEQ_REALLOC(H5HG_obj_t, heap->obj, new_alloc)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
-
- /* Clear newly allocated space */
- HDmemset(&new_obj[heap->nalloc], 0, (new_alloc - heap->nalloc) * sizeof(heap->obj[0]));
-
- /* Update heap information */
- heap->nalloc = new_alloc;
- heap->obj = new_obj;
- HDassert(heap->nalloc > heap->nused);
+ HDassert((len == H5HG_MINSIZE) /* first try */ ||
+ ((len == heap->size) && (len > H5HG_MINSIZE))); /* second try */
+
+ if(len == heap->size) { /* proceed with the deserialize */
+ size_t max_idx = 0;
+ size_t nalloc;
+
+ /* Decode each object */
+ image = heap->chunk + H5HG_SIZEOF_HDR(f);
+ nalloc = H5HG_NOBJS(f, heap->size);
+
+ /* Calloc the obj array because the file format spec makes no guarantee
+ * about the order of the objects, and unused slots must be set to zero.
+ */
+ if(NULL == (heap->obj = H5FL_SEQ_CALLOC(H5HG_obj_t, nalloc)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ heap->nalloc = nalloc;
+
+ while(image < (heap->chunk + heap->size)) {
+ if((image + H5HG_SIZEOF_OBJHDR(f)) > (heap->chunk + heap->size)) {
+ /*
+ * The last bit of space is too tiny for an object header, so
+ * we assume that it's free space.
+ */
+ HDassert(NULL == heap->obj[0].begin);
+ heap->obj[0].size = (size_t)(((const uint8_t *)heap->chunk + heap->size) - image);
+ heap->obj[0].begin = image;
+ image += heap->obj[0].size;
} /* end if */
-
- UINT16DECODE(p, heap->obj[idx].nrefs);
- p += 4; /*reserved*/
- H5F_DECODE_LENGTH(f, p, heap->obj[idx].size);
- heap->obj[idx].begin = begin;
-
- /*
- * The total storage size includes the size of the object header
- * and is zero padded so the next object header is properly
- * aligned. The entire obj array was calloc'ed, so no need to zero
- * the space here. The last bit of space is the free space object
- * whose size is never padded and already includes the object
- * header.
- */
- if(idx > 0) {
- need = H5HG_SIZEOF_OBJHDR(f) + H5HG_ALIGN(heap->obj[idx].size);
-
- if(idx > max_idx)
- max_idx = idx;
- } /* end if */
- else
- need = heap->obj[idx].size;
- p = begin + need;
- } /* end else */
- } /* end while */
- HDassert(p == heap->chunk + heap->size);
- HDassert(H5HG_ISALIGNED(heap->obj[0].size));
-
- /* Set the next index value to use */
- if(max_idx > 0)
- heap->nused = max_idx + 1;
+ else {
+ size_t need;
+ unsigned idx;
+ uint8_t *begin = image;
+
+ UINT16DECODE(image, idx);
+
+ /* Check if we need more room to store heap objects */
+ if(idx >= heap->nalloc) {
+ size_t new_alloc; /* New allocation number */
+ H5HG_obj_t *new_obj; /* New array of object descriptions */
+
+ /* Determine the new number of objects to index */
+ new_alloc = MAX(heap->nalloc * 2, (idx + 1));
+ HDassert(idx < new_alloc);
+
+ /* Reallocate array of objects */
+ if(NULL == (new_obj = H5FL_SEQ_REALLOC(H5HG_obj_t, heap->obj, new_alloc)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+
+ /* Clear newly allocated space */
+ HDmemset(&new_obj[heap->nalloc], 0, (new_alloc - heap->nalloc) * sizeof(heap->obj[0]));
+
+ /* Update heap information */
+ heap->nalloc = new_alloc;
+ heap->obj = new_obj;
+ HDassert(heap->nalloc > heap->nused);
+ } /* end if */
+
+ UINT16DECODE(image, heap->obj[idx].nrefs);
+ image += 4; /*reserved*/
+ H5F_DECODE_LENGTH(f, image, heap->obj[idx].size);
+ heap->obj[idx].begin = begin;
+
+ /*
+ * The total storage size includes the size of the object
+ * header and is zero padded so the next object header is
+ * properly aligned. The entire obj array was calloc'ed,
+ * so no need to zero the space here. The last bit of space
+ * is the free space object whose size is never padded and
+ * already includes the object header.
+ */
+ if(idx > 0) {
+ need = H5HG_SIZEOF_OBJHDR(f) + H5HG_ALIGN(heap->obj[idx].size);
+ if(idx > max_idx)
+ max_idx = idx;
+ } /* end if */
+ else
+ need = heap->obj[idx].size;
+
+ image = begin + need;
+ } /* end else */
+ } /* end while */
+
+ HDassert(image == heap->chunk + heap->size);
+ HDassert(H5HG_ISALIGNED(heap->obj[0].size));
+
+ /* Set the next index value to use */
+ if(max_idx > 0)
+ heap->nused = max_idx + 1;
+ else
+ heap->nused = 1;
+
+ HDassert(max_idx < heap->nused);
+
+ /* Add the new heap to the CWFS list for the file */
+ if(H5F_cwfs_add(f, heap) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "unable to add global heap collection to file's CWFS")
+ } /* end if ( len == heap->size ) */
else
- heap->nused = 1;
-
- HDassert(max_idx < heap->nused);
-
- /* Add the new heap to the CWFS list for the file */
- if(H5F_cwfs_add(f, heap) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "unable to add global heap collection to file's CWFS")
+ /* if len is less than heap size, then the initial speculative
+ * read was too small. In this case we return without reporting
+ * failure. H5C_load_entry() will call H5HG__cache_heap_image_len()
+ * to get the actual read size, and then repeat the read with the
+ * correct size, and call this function a second time.
+ */
+ HDassert(len < heap->size);
ret_value = heap;
@@ -263,154 +313,126 @@ done:
HDONE_ERROR(H5E_HEAP, H5E_CANTFREE, NULL, "unable to destroy global heap collection")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HG_load() */
+} /* end H5HG__cache_heap_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5HG_flush
+ * Function: H5HG__cache_heap_image_len
*
- * Purpose: Flushes a global heap collection from memory to disk if it's
- * dirty. Optionally deletes teh heap from memory.
+ * Purpose: Return the on disk image size of the global heap to the
+ * metadata cache via the image_len.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Robb Matzke
- * Friday, March 27, 1998
+ * Programmer: John Mainzer
+ * 7/27/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5HG_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5HG_heap_t *heap, unsigned H5_ATTR_UNUSED * flags_ptr)
+H5HG__cache_heap_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5HG_heap_t *heap = (const H5HG_heap_t *)_thing;
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
- HDassert(H5F_addr_eq(addr, heap->addr));
+ /* Sanity checks */
HDassert(heap);
+ HDassert(heap->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(heap->cache_info.type == H5AC_GHEAP);
+ HDassert(heap->size >= H5HG_MINSIZE);
+ HDassert(image_len);
- if(heap->cache_info.is_dirty) {
- if(H5F_block_write(f, H5FD_MEM_GHEAP, addr, heap->size, dxpl_id, heap->chunk) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "unable to write global heap collection to file")
- heap->cache_info.is_dirty = FALSE;
- } /* end if */
+ *image_len = heap->size;
- if(destroy)
- if(H5HG_dest(f, heap) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy global heap collection")
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HG__cache_heap_image_len() */
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HG_flush() */
+/**************************************/
+/* no H5HG_cache_heap_pre_serialize() */
+/**************************************/
/*-------------------------------------------------------------------------
- * Function: H5HG_dest
+ * Function: H5HG__cache_heap_serialize
+ *
+ * Purpose: Given an appropriately sized buffer and an instance of
+ * H5HG_heap_t, serialize the global heap for writing to file,
+ * and copy the serialized verion into the buffer.
*
- * Purpose: Destroys a global heap collection in memory
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * Wednesday, January 15, 2003
+ * Programmer: John Mainzer
+ * 7/27/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5HG_dest(H5F_t *f, H5HG_heap_t *heap)
+H5HG__cache_heap_serialize(const H5F_t *f, void *image, size_t len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5HG_heap_t *heap = (H5HG_heap_t *)_thing;
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /* Check arguments */
+ HDassert(f);
+ HDassert(image);
HDassert(heap);
+ HDassert(heap->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(heap->cache_info.type == H5AC_GHEAP);
+ HDassert(heap->size == len);
+ HDassert(heap->chunk);
- /* Verify that node is clean */
- HDassert(heap->cache_info.is_dirty == FALSE);
+ /* copy the image into the buffer */
+ HDmemcpy(image, heap->chunk, len);
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!heap->cache_info.free_file_space_on_destroy || H5F_addr_defined(heap->cache_info.addr));
-
- /* Check for freeing file space for globalheap */
- if(heap->cache_info.free_file_space_on_destroy) {
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_GHEAP, H5AC_dxpl_id, heap->cache_info.addr, (hsize_t)heap->size) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free global heap")
- } /* end if */
-
- /* Destroy global heap collection */
- if(H5HG_free(heap) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy global heap collection")
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HG__cache_heap_serialize() */
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5HG_dest() */
+/****************************************/
+/* no H5HG_cache_heap_notify() function */
+/****************************************/
/*-------------------------------------------------------------------------
- * Function: H5HG_clear
+ * Function: H5HG__cache_heap_free_icr
*
- * Purpose: Mark a global heap in memory as non-dirty.
+ * Purpose: Free the in memory representation of the supplied global heap.
*
- * Return: Non-negative on success/Negative on failure
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
*
- * Programmer: Quincey Koziol
- * Thursday, March 20, 2003
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 7/27/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5HG_clear(H5F_t *f, H5HG_heap_t *heap, hbool_t destroy)
+H5HG__cache_heap_free_icr(void *_thing)
{
+ H5HG_heap_t *heap = (H5HG_heap_t *)_thing;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Sanity checks */
HDassert(heap);
+ HDassert(heap->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(heap->cache_info.type == H5AC_GHEAP);
- /* Mark heap as clean */
- heap->cache_info.is_dirty = FALSE;
-
- if(destroy)
- if(H5HG_dest(f, heap) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy global heap collection")
+ /* Destroy global heap collection */
+ if(H5HG_free(heap) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy global heap collection")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5HG_clear() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5HG_size
- *
- * Purpose: Compute the size in bytes of the specified instance of
- * H5HG_heap_t on disk, and return it in *len_ptr. On failure,
- * the value of *len_ptr is undefined.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 5/13/04
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5HG_size(const H5F_t H5_ATTR_UNUSED *f, const H5HG_heap_t *heap, size_t *size_ptr)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Check arguments */
- HDassert(heap);
- HDassert(size_ptr);
-
- *size_ptr = heap->size;
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HG_size() */
+} /* end H5HG__cache_heap_free_icr() */
diff --git a/src/H5HGdbg.c b/src/H5HGdbg.c
index 16d8c49..c79aac8 100644
--- a/src/H5HGdbg.c
+++ b/src/H5HGdbg.c
@@ -103,7 +103,7 @@ H5HG_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
HDassert(indent >= 0);
HDassert(fwidth >= 0);
- if(NULL == (h = H5HG_protect(f, dxpl_id, addr, H5AC_READ)))
+ if(NULL == (h = H5HG_protect(f, dxpl_id, addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap collection");
HDfprintf(stream, "%*sGlobal Heap Collection...\n", indent, "");
diff --git a/src/H5HGpkg.h b/src/H5HGpkg.h
index f3546a2..9137e7b 100644
--- a/src/H5HGpkg.h
+++ b/src/H5HGpkg.h
@@ -143,7 +143,7 @@ struct H5HG_heap_t {
/* Package Private Prototypes */
/******************************/
H5_DLL herr_t H5HG_free(H5HG_heap_t *heap);
-H5_DLL H5HG_heap_t *H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw);
+H5_DLL H5HG_heap_t *H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, unsigned flags);
#endif /* _H5HGpkg_H */
diff --git a/src/H5HL.c b/src/H5HL.c
index b1b5b24..eae0482 100644
--- a/src/H5HL.c
+++ b/src/H5HL.c
@@ -441,7 +441,7 @@ done:
*-------------------------------------------------------------------------
*/
H5HL_t *
-H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
+H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, unsigned flags)
{
H5HL_cache_prfx_ud_t prfx_udata; /* User data for protecting local heap prefix */
H5HL_prfx_t *prfx = NULL; /* Local heap prefix */
@@ -457,14 +457,19 @@ H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
HDassert(f);
HDassert(H5F_addr_defined(addr));
+ /* only the H5AC__READ_ONLY_FLAG may appear in flags */
+ HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Construct the user data for protect callback */
+ prfx_udata.made_attempt = FALSE;
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
+ prfx_udata.loaded = FALSE;
/* Protect the local heap prefix */
- if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, rw)))
+ if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to load heap prefix")
/* Get the pointer to the heap */
@@ -486,7 +491,7 @@ H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
dblk_udata.loaded = FALSE;
/* Protect the local heap data block */
- if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, &dblk_udata, rw)))
+ if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, &dblk_udata, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to load heap data block")
/* Pin the prefix, if the data block was loaded from file */
@@ -1071,13 +1076,15 @@ H5HL_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr)
HDassert(H5F_addr_defined(addr));
/* Construct the user data for protect callback */
+ prfx_udata.made_attempt = FALSE;
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
+ prfx_udata.loaded = FALSE;
/* Protect the local heap prefix */
- if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC_WRITE)))
+ if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap prefix")
/* Get the pointer to the heap */
@@ -1092,7 +1099,7 @@ H5HL_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr)
dblk_udata.loaded = FALSE;
/* Protect the local heap data block */
- if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, &dblk_udata, H5AC_WRITE)))
+ if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, &dblk_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap data block")
/* Pin the prefix, if the data block was loaded from file */
@@ -1147,13 +1154,15 @@ H5HL_get_size(H5F_t *f, hid_t dxpl_id, haddr_t addr, size_t *size)
HDassert(size);
/* Construct the user data for protect callback */
+ prfx_udata.made_attempt = FALSE;
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
+ prfx_udata.loaded = FALSE;
/* Protect the local heap prefix */
- if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC_READ)))
+ if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap prefix")
/* Get the pointer to the heap */
@@ -1199,13 +1208,15 @@ H5HL_heapsize(H5F_t *f, hid_t dxpl_id, haddr_t addr, hsize_t *heap_size)
HDassert(heap_size);
/* Construct the user data for protect callback */
+ prfx_udata.made_attempt = FALSE;
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
+ prfx_udata.loaded = FALSE;
/* Protect the local heap prefix */
- if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC_READ)))
+ if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap prefix")
/* Get the pointer to the heap */
diff --git a/src/H5HLcache.c b/src/H5HLcache.c
index b2c764d..17ded00 100644
--- a/src/H5HLcache.c
+++ b/src/H5HLcache.c
@@ -69,18 +69,26 @@
/********************/
/* Metadata cache callbacks */
-static void *H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5HL_prefix_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr,
- void *thing, unsigned *flags_ptr);
-static herr_t H5HL_prefix_dest(H5F_t *f, void *thing);
-static herr_t H5HL_prefix_clear(H5F_t *f, void *thing, hbool_t destroy);
-static herr_t H5HL_prefix_size(const H5F_t *f, const void *thing, size_t *size_ptr);
-static void *H5HL_datablock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5HL_datablock_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr,
- void *thing, unsigned *flags_ptr);
-static herr_t H5HL_datablock_dest(H5F_t *f, void *thing);
-static herr_t H5HL_datablock_clear(H5F_t *f, void *thing, hbool_t destroy);
-static herr_t H5HL_datablock_size(const H5F_t *f, const void *thing, size_t *size_ptr);
+static herr_t H5HL__cache_prefix_get_load_size(const void *udata, size_t *image_len);
+static void *H5HL__cache_prefix_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5HL__cache_prefix_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5HL__cache_prefix_serialize(const H5F_t *f, void *image,
+ size_t len, void *thing);
+static herr_t H5HL__cache_prefix_free_icr(void *thing);
+
+static herr_t H5HL__cache_datablock_get_load_size(const void *udata,
+ size_t *image_len);
+static void *H5HL__cache_datablock_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5HL__cache_datablock_image_len(const void *thing,
+ size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5HL__cache_datablock_serialize(const H5F_t *f, void *image,
+ size_t len, void *thing);
+static herr_t H5HL__cache_datablock_free_icr(void *thing);
/*********************/
@@ -89,23 +97,35 @@ static herr_t H5HL_datablock_size(const H5F_t *f, const void *thing, size_t *siz
/* H5HL inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_LHEAP_PRFX[1] = {{
- H5AC_LHEAP_PRFX_ID,
- H5HL_prefix_load,
- H5HL_prefix_flush,
- H5HL_prefix_dest,
- H5HL_prefix_clear,
- NULL,
- H5HL_prefix_size,
+ H5AC_LHEAP_PRFX_ID, /* Metadata client ID */
+ "local heap prefix", /* Metadata client name (for debugging) */
+ H5FD_MEM_LHEAP, /* File space memory type for client */
+ H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
+ H5HL__cache_prefix_get_load_size, /* 'get_load_size' callback */
+ H5HL__cache_prefix_deserialize, /* 'deserialize' callback */
+ H5HL__cache_prefix_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5HL__cache_prefix_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5HL__cache_prefix_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
const H5AC_class_t H5AC_LHEAP_DBLK[1] = {{
- H5AC_LHEAP_DBLK_ID,
- H5HL_datablock_load,
- H5HL_datablock_flush,
- H5HL_datablock_dest,
- H5HL_datablock_clear,
- NULL,
- H5HL_datablock_size,
+ H5AC_LHEAP_DBLK_ID, /* Metadata client ID */
+ "local heap datablock", /* Metadata client name (for debugging) */
+ H5FD_MEM_LHEAP, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5HL__cache_datablock_get_load_size,/* 'get_load_size' callback */
+ H5HL__cache_datablock_deserialize, /* 'deserialize' callback */
+ H5HL__cache_datablock_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5HL__cache_datablock_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5HL__cache_datablock_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
@@ -149,7 +169,7 @@ H5HL__fl_deserialize(H5HL_t *heap)
/* Build free list */
free_block = heap->free_block;
while(H5HL_FREE_NULL != free_block) {
- const uint8_t *p; /* Pointer into image buffer */
+ const uint8_t *image; /* Pointer into image buffer */
/* Sanity check */
if(free_block >= heap->dblk_size)
@@ -163,13 +183,13 @@ H5HL__fl_deserialize(H5HL_t *heap)
fl->next = NULL;
/* Decode offset of next free block */
- p = heap->dblk_image + free_block;
- H5F_DECODE_LENGTH_LEN(p, free_block, heap->sizeof_size);
+ image = heap->dblk_image + free_block;
+ H5F_DECODE_LENGTH_LEN(image, free_block, heap->sizeof_size);
if(free_block == 0)
HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "free block size is zero?")
/* Decode length of this free block */
- H5F_DECODE_LENGTH_LEN(p, fl->size, heap->sizeof_size);
+ H5F_DECODE_LENGTH_LEN(image, fl->size, heap->sizeof_size);
if((fl->offset + fl->size) > heap->dblk_size)
HGOTO_ERROR(H5E_HEAP, H5E_BADRANGE, FAIL, "bad heap free list")
@@ -217,17 +237,17 @@ H5HL__fl_serialize(const H5HL_t *heap)
/* Serialize the free list into the heap data's image */
for(fl = heap->freelist; fl; fl = fl->next) {
- uint8_t *p; /* Pointer into raw data buffer */
+ uint8_t *image; /* Pointer into raw data buffer */
HDassert(fl->offset == H5HL_ALIGN(fl->offset));
- p = heap->dblk_image + fl->offset;
+ image = heap->dblk_image + fl->offset;
if(fl->next)
- H5F_ENCODE_LENGTH_LEN(p, fl->next->offset, heap->sizeof_size)
+ H5F_ENCODE_LENGTH_LEN(image, fl->next->offset, heap->sizeof_size)
else
- H5F_ENCODE_LENGTH_LEN(p, H5HL_FREE_NULL, heap->sizeof_size)
+ H5F_ENCODE_LENGTH_LEN(image, H5HL_FREE_NULL, heap->sizeof_size)
- H5F_ENCODE_LENGTH_LEN(p, fl->size, heap->sizeof_size)
+ H5F_ENCODE_LENGTH_LEN(image, fl->size, heap->sizeof_size)
} /* end for */
FUNC_LEAVE_NOAPI_VOID
@@ -235,69 +255,88 @@ H5HL__fl_serialize(const H5HL_t *heap)
/*-------------------------------------------------------------------------
- * Function: H5HL_prefix_load
+ * Function: H5HL__cache_prefix_get_load_size()
*
- * Purpose: Loads a local heap prefix from disk.
+ * Purpose: Return the size of the buffer the metadata cache should
+ * load from file and pass to the deserialize routine.
*
- * Return: Success: Ptr to a local heap memory data structure.
- * Failure: NULL
+ * The version 2 metadata cache callbacks included a test to
+ * ensure that the read did not pass the end of file, but this
+ * functionality has been moved to H5C_load_entry(). Thus
+ * all this function does is set *image_len equal to
+ * H5HL_SPEC_READ_SIZE, leaving it to the metadata cache to
+ * reduce the size of the read if appropriate.
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jul 17 1997
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HL__cache_prefix_get_load_size(const void H5_ATTR_UNUSED *_udata, size_t *image_len)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ HDassert(image_len);
+
+ *image_len = H5HL_SPEC_READ_SIZE;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HL__cache_prefix_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HL__cache_prefix_deserialize
+ *
+ * Purpose: Given a buffer containing the on disk image of the local
+ * heap prefix, deserialize it, load its contents into a newly allocated
+ * instance of H5HL_prfx_t, and return a pointer to the new instance.
+ *
+ * Return: Success: Pointer to in core representation
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
static void *
-H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+H5HL__cache_prefix_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
{
- H5HL_t *heap = NULL; /* Local heap */
- H5HL_prfx_t *prfx = NULL; /* Heap prefix deserialized */
+ H5HL_t *heap = NULL; /* Local heap */
+ H5HL_prfx_t *prfx = NULL; /* Heap prefix deserialized */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into decoding buffer */
H5HL_cache_prfx_ud_t *udata = (H5HL_cache_prfx_ud_t *)_udata; /* User data for callback */
- uint8_t buf[H5HL_SPEC_READ_SIZE]; /* Buffer for decoding */
- size_t spec_read_size; /* Size of buffer to speculatively read in */
- const uint8_t *p; /* Pointer into decoding buffer */
- haddr_t eoa; /* Relative end of file address */
- hsize_t min; /* temp min value to avoid macro nesting */
- H5HL_prfx_t *ret_value; /* Return value */
+ void *ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
+ HDassert(image);
+ HDassert(len > 0);
HDassert(udata);
HDassert(udata->sizeof_size > 0);
HDassert(udata->sizeof_addr > 0);
HDassert(udata->sizeof_prfx > 0);
- HDassert(udata->sizeof_prfx <= sizeof(buf));
-
- /* Make certain we don't speculatively read off the end of the file */
- if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, H5FD_MEM_LHEAP)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, NULL, "unable to determine file size")
-
- /* Compute the size of the speculative local heap prefix buffer */
- min = MIN(eoa - addr, H5HL_SPEC_READ_SIZE);
- H5_CHECKED_ASSIGN(spec_read_size, size_t, min, hsize_t);
- HDassert(spec_read_size >= udata->sizeof_prfx);
-
- /* Attempt to speculatively read both local heap prefix and heap data */
- if(H5F_block_read(f, H5FD_MEM_LHEAP, addr, spec_read_size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "unable to read local heap prefix")
- p = buf;
+ HDassert(H5F_addr_defined(udata->prfx_addr));
+ HDassert(dirty);
/* Check magic number */
- if(HDmemcmp(p, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "bad local heap signature")
- p += H5_SIZEOF_MAGIC;
+ if(HDmemcmp(image, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "bad local heap signature")
+ image += H5_SIZEOF_MAGIC;
/* Version */
- if(H5HL_VERSION != *p++)
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "wrong version number in local heap")
+ if(H5HL_VERSION != *image++)
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "wrong version number in local heap")
/* Reserved */
- p += 3;
-
+ image += 3;
+
/* Allocate space in memory for the heap */
if(NULL == (heap = H5HL_new(udata->sizeof_size, udata->sizeof_addr, udata->sizeof_prfx)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate local heap structure")
@@ -311,16 +350,16 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
heap->prfx_size = udata->sizeof_prfx;
/* Heap data size */
- H5F_DECODE_LENGTH_LEN(p, heap->dblk_size, udata->sizeof_size);
+ H5F_DECODE_LENGTH_LEN(image, heap->dblk_size, udata->sizeof_size);
/* Free list head */
- H5F_DECODE_LENGTH_LEN(p, heap->free_block, udata->sizeof_size);
+ H5F_DECODE_LENGTH_LEN(image, heap->free_block, udata->sizeof_size);
if((heap->free_block != H5HL_FREE_NULL) && (heap->free_block >= heap->dblk_size))
HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "bad heap free list")
/* Heap data address */
- H5F_addr_decode_len(udata->sizeof_addr, &p, &(heap->dblk_addr));
+ H5F_addr_decode_len(udata->sizeof_addr, &image, &(heap->dblk_addr));
/* Check if heap block exists */
if(heap->dblk_size) {
@@ -329,35 +368,53 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Note that the heap should be a single object in the cache */
heap->single_cache_obj = TRUE;
- /* Allocate space for the heap data image */
- if(NULL == (heap->dblk_image = H5FL_BLK_MALLOC(lheap_chunk, heap->dblk_size)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "memory allocation failed")
+ /* Check if the current buffer from the speculative read
+ * already has the heap data
+ */
+ if(len >= (heap->prfx_size + heap->dblk_size)) {
+ /* Allocate space for the heap data image */
+ if(NULL == (heap->dblk_image = H5FL_BLK_MALLOC(lheap_chunk, heap->dblk_size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "memory allocation failed")
- /* Check if the current buffer from the speculative read already has the heap data */
- if(spec_read_size >= (heap->prfx_size + heap->dblk_size)) {
- /* Set p to the start of the data block. This is necessary
+ /* Set image to the start of the data block. This is necessary
* because there may be a gap between the used portion of the
* prefix and the data block due to alignment constraints. */
- p = buf + heap->prfx_size;
+ image = ((const uint8_t *)_image) + heap->prfx_size;
/* Copy the heap data from the speculative read buffer */
- HDmemcpy(heap->dblk_image, p, heap->dblk_size);
+ HDmemcpy(heap->dblk_image, image, heap->dblk_size);
+
+ /* Build free list */
+ if(H5HL__fl_deserialize(heap) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "can't initialize free list")
} /* end if */
else {
- /* Read the local heap data block directly into buffer */
- if(H5F_block_read(f, H5FD_MEM_LHEAP, heap->dblk_addr, heap->dblk_size, dxpl_id, heap->dblk_image) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "unable to read heap data")
- } /* end else */
-
- /* Build free list */
- if(H5HL__fl_deserialize(heap) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "can't initialize free list")
+ /* the supplied buffer is too small -- We have already made note
+ * of the correct size, so simply return success. H5C_load_entry()
+ * will notice the size discrepency, and re-try the load.
+ */
+
+ /* Make certain that this is the first try ... */
+ HDassert(!udata->made_attempt);
+
+ /* ... and mark the udata so that we know that we have used up
+ * our first try.
+ */
+ udata->made_attempt = TRUE;
+ } /* end else */
} /* end if */
- else
- /* Note that the heap should _NOT_ be a single object in the cache */
+ else {
+ /* Note that the heap should _NOT_ be a single
+ * object in the cache
+ */
heap->single_cache_obj = FALSE;
+
+ } /* end else */
} /* end if */
+ /* Set flag to indicate prefix from loaded from file */
+ udata->loaded = TRUE;
+
/* Set return value */
ret_value = prfx;
@@ -375,273 +432,257 @@ done:
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HL_prefix_load() */
+} /* end H5HL__cache_prefix_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5HL_prefix_flush
+ * Function: H5HL__cache_prefix_image_len
*
- * Purpose: Flushes a heap from memory to disk if it's dirty. Optionally
- * deletes the heap from memory.
+ * Purpose: Return the on disk image size of a local heap prefix to the
+ * metadata cache via the image_len.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jul 17 1997
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5HL_prefix_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- void *thing, unsigned H5_ATTR_UNUSED *flags_ptr)
+H5HL__cache_prefix_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- H5HL_prfx_t *prfx = (H5HL_prfx_t *)thing; /* Local heap prefix to flush */
- H5WB_t *wb = NULL; /* Wrapped buffer for heap data */
- uint8_t heap_buf[H5HL_SPEC_READ_SIZE]; /* Buffer for heap */
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5HL_prfx_t *prfx = (const H5HL_prfx_t *)_thing; /* Pointer to local heap prefix to query */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /* check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(prfx);
+ HDassert(prfx->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(prfx->cache_info.type == H5AC_LHEAP_PRFX);
+ HDassert(image_len);
- if(prfx->cache_info.is_dirty) {
- H5HL_t *heap = prfx->heap; /* Pointer to the local heap */
- uint8_t *buf; /* Pointer to heap buffer */
- size_t buf_size; /* Size of buffer for encoding & writing heap info */
- uint8_t *p; /* Pointer into raw data buffer */
-
- /* Wrap the local buffer for serialized heap info */
- if(NULL == (wb = H5WB_wrap(heap_buf, sizeof(heap_buf))))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't wrap buffer")
-
- /* Compute the size of the buffer to encode & write */
- buf_size = heap->prfx_size;
- if(heap->single_cache_obj)
- buf_size += heap->dblk_size;
-
- /* Get a pointer to a buffer that's large enough for serialized heap */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, buf_size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "can't get actual buffer")
-
- /* Update the free block value from the free list */
- heap->free_block = heap->freelist ? heap->freelist->offset : H5HL_FREE_NULL;
-
- /* Serialize the heap prefix */
- p = buf;
- HDmemcpy(p, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
- *p++ = H5HL_VERSION;
- *p++ = 0; /*reserved*/
- *p++ = 0; /*reserved*/
- *p++ = 0; /*reserved*/
- H5F_ENCODE_LENGTH_LEN(p, heap->dblk_size, heap->sizeof_size);
- H5F_ENCODE_LENGTH_LEN(p, heap->free_block, heap->sizeof_size);
- H5F_addr_encode_len(heap->sizeof_addr, &p, heap->dblk_addr);
-
- /* Check if the local heap is a single object in cache */
- if(heap->single_cache_obj) {
- if((size_t)(p - buf) < heap->prfx_size) {
- size_t gap; /* Size of gap between prefix and data block */
-
- /* Set p to the start of the data block. This is necessary because
- * there may be a gap between the used portion of the prefix and the
- * data block due to alignment constraints. */
- gap = heap->prfx_size - (size_t)(p - buf);
- HDmemset(p, 0, gap);
- p += gap;
- } /* end if */
-
- /* Serialize the free list into the heap data's image */
- H5HL__fl_serialize(heap);
-
- /* Copy the heap data block into the cache image */
- HDmemcpy(p, heap->dblk_image, heap->dblk_size);
- } /* end if */
-
- /* Write the prefix [and possibly the data block] to the file */
- if(H5F_block_write(f, H5FD_MEM_LHEAP, addr, buf_size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "unable to write heap header and data to file")
+ /* Set the prefix's size */
+ *image_len = prfx->heap->prfx_size;
- prfx->cache_info.is_dirty = FALSE;
- } /* end if */
+ /* If the heap is stored as a single object, add in the
+ * data block size also
+ */
+ if(prfx->heap->single_cache_obj)
+ *image_len += prfx->heap->dblk_size;
- /* Should we destroy the memory version? */
- if(destroy)
- if(H5HL_prefix_dest(f, prfx) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy local heap prefix")
-done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_HEAP, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer")
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HL__cache_prefix_image_len() */
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HL_prefix_flush() */
+/****************************************/
+/* no H5HL_cache_prefix_pre_serialize() */
+/****************************************/
/*-------------------------------------------------------------------------
- * Function: H5HL_prefix_dest
+ * Function: H5HL__cache_prefix_serialize
*
- * Purpose: Destroys a heap prefix in memory.
+ * Purpose: Given a pointer to an instance of H5HL_prfx_t and an
+ * appropriately sized buffer, serialize the contents of the
+ * instance for writing to disk, and copy the serialized data
+ * into the buffer.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Jan 15 2003
+ * Programmer: John Mainzer
+ * 7/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5HL_prefix_dest(H5F_t *f, void *thing)
+H5HL__cache_prefix_serialize(const H5F_t *f, void *_image, size_t len,
+ void *_thing)
{
- H5HL_prfx_t *prfx = (H5HL_prfx_t *)thing; /* Local heap prefix to destroy */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5HL_prfx_t *prfx = (H5HL_prfx_t *)_thing; /* Pointer to local heap prefix to query */
+ H5HL_t *heap; /* Pointer to the local heap */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into image buffer */
+ size_t buf_size; /* expected size of the image buffer */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /* check arguments */
+ /* Check arguments */
+ HDassert(f);
+ HDassert(image);
HDassert(prfx);
- HDassert(prfx->heap);
+ HDassert(prfx->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(prfx->cache_info.type == H5AC_LHEAP_PRFX);
HDassert(H5F_addr_eq(prfx->cache_info.addr, prfx->heap->prfx_addr));
+ HDassert(prfx->heap);
- /* Verify that entry is clean */
- HDassert(prfx->cache_info.is_dirty == FALSE);
-
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!prfx->cache_info.free_file_space_on_destroy || H5F_addr_defined(prfx->cache_info.addr));
+ /* Get the pointer to the heap */
+ heap = prfx->heap;
+ HDassert(heap);
- /* Check for freeing file space for local heap prefix */
- if(prfx->cache_info.free_file_space_on_destroy) {
- hsize_t free_size; /* Size of region to free in file */
+ /* Compute the buffer size */
+ buf_size = heap->prfx_size;
+ if(heap->single_cache_obj)
+ buf_size += heap->dblk_size;
+ HDassert(len == buf_size);
+
+ /* Update the free block value from the free list */
+ heap->free_block = heap->freelist ? heap->freelist->offset : H5HL_FREE_NULL;
+
+ /* Serialize the heap prefix */
+ HDmemcpy(image, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
+ *image++ = H5HL_VERSION;
+ *image++ = 0; /*reserved*/
+ *image++ = 0; /*reserved*/
+ *image++ = 0; /*reserved*/
+ H5F_ENCODE_LENGTH_LEN(image, heap->dblk_size, heap->sizeof_size);
+ H5F_ENCODE_LENGTH_LEN(image, heap->free_block, heap->sizeof_size);
+ H5F_addr_encode_len(heap->sizeof_addr, &image, heap->dblk_addr);
+
+ /* Check if the local heap is a single object in cache */
+ if(heap->single_cache_obj) {
+ if((size_t)(image - (uint8_t *)_image) < heap->prfx_size) {
+ size_t gap; /* Size of gap between prefix and data block */
+
+ /* Set image to the start of the data block. This is necessary
+ * because there may be a gap between the used portion of
+ * the prefix and the data block due to alignment constraints.
+ */
+ gap = heap->prfx_size - (size_t)(image - (uint8_t *)_image);
+ HDmemset(image, 0, gap);
+ image += gap;
+ } /* end if */
- /* Compute size to free for later */
- free_size = prfx->heap->prfx_size;
- if(prfx->heap->single_cache_obj)
- free_size += prfx->heap->dblk_size;
+ /* Serialize the free list into the heap data's image */
+ H5HL__fl_serialize(heap);
- /* Free the local heap prefix [and possible the data block] on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_LHEAP, H5AC_dxpl_id, prfx->cache_info.addr, free_size) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free local heap prefix")
+ /* Copy the heap data block into the cache image */
+ HDmemcpy(image, heap->dblk_image, heap->dblk_size);
} /* end if */
- /* Destroy local heap prefix */
- if(H5HL_prfx_dest(prfx) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRELEASE, FAIL, "can't destroy local heap prefix")
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HL__cache_prefix_serialize() */
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HL_prefix_dest() */
+/******************************************/
+/* no H5HL_cache_prefix_notify() function */
+/******************************************/
/*-------------------------------------------------------------------------
- * Function: H5HL_prefix_clear
+ * Function: H5HL__cache_prefix_free_icr
*
- * Purpose: Mark a local heap prefix in memory as non-dirty.
+ * Purpose: Free the supplied in core representation of a local heap
+ * prefix.
*
- * Return: Non-negative on success/Negative on failure
+ * Note that this function handles the partially initialize prefix
+ * from a failed speculative load attempt. See comments below for
+ * details.
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Mar 20 2003
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5HL_prefix_clear(H5F_t H5_ATTR_UNUSED *f, void *thing, hbool_t destroy)
+H5HL__cache_prefix_free_icr(void *_thing)
{
- H5HL_prfx_t *prfx = (H5HL_prfx_t *)thing; /* The local heap prefix to operate on */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5HL_prfx_t *prfx = (H5HL_prfx_t *)_thing; /* Pointer to local heap prefix to query */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* check arguments */
+ /* Check arguments */
HDassert(prfx);
+ HDassert(prfx->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(prfx->cache_info.type == H5AC_LHEAP_PRFX);
+ HDassert(H5F_addr_eq(prfx->cache_info.addr, prfx->heap->prfx_addr));
- /* Mark heap prefix as clean */
- prfx->cache_info.is_dirty = FALSE;
-
- if(destroy)
- if(H5HL_prefix_dest(f, prfx) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy local heap prefix")
+ /* Destroy local heap prefix */
+ if(H5HL_prfx_dest(prfx) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTRELEASE, FAIL, "can't destroy local heap prefix")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HL_prefix_clear() */
+} /* end H5HL__cache_prefix_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5HL_prefix_size
+ * Function: H5HL__cache_datablock_get_load_size()
*
- * Purpose: Compute the size in bytes of the heap prefix on disk,
- * and return it in *len_ptr. On failure, the value of *len_ptr
- * is undefined.
+ * Purpose: Tell the metadata cache how large a buffer to read from
+ * file when loading a datablock. In this case, we simply lookup
+ * the correct value in the user data, and return it in *image_len.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: John Mainzer
- * 5/13/04
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5HL_prefix_size(const H5F_t H5_ATTR_UNUSED *f, const void *thing, size_t *size_ptr)
+H5HL__cache_datablock_get_load_size(const void *_udata, size_t *image_len)
{
- const H5HL_prfx_t *prfx = (const H5HL_prfx_t *)thing; /* Pointer to local heap prefix to query */
-
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ const H5HL_cache_dblk_ud_t *udata = (const H5HL_cache_dblk_ud_t *)_udata; /* User data for callback */
- /* check arguments */
- HDassert(prfx);
- HDassert(prfx->heap);
- HDassert(size_ptr);
+ FUNC_ENTER_STATIC_NOERR
- /* Calculate size of prefix in cache */
- *size_ptr = prfx->heap->prfx_size;
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(udata->heap);
+ HDassert(udata->heap->dblk_size > 0);
+ HDassert(image_len);
- /* If the heap is stored as a single object, add in the data block size also */
- if(prfx->heap->single_cache_obj)
- *size_ptr += prfx->heap->dblk_size;
+ *image_len = udata->heap->dblk_size;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HL_prefix_size() */
+} /* end H5HL__cache_datablock_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5HL_datablock_load
+ * Function: H5HL__cache_datablock_deserialize
*
- * Purpose: Loads a local heap data block from disk.
+ * Purpose: Given a buffer containing the on disk image of a local
+ * heap data block, deserialize it, load its contents into a newly allocated
+ * instance of H5HL_dblk_t, and return a pointer to the new instance.
*
- * Return: Success: Ptr to a local heap data block memory data structure.
- * Failure: NULL
+ * Return: Success: Pointer to in core representation
+ * Failure: NULL
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Jan 5 2010
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
static void *
-H5HL_datablock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+H5HL__cache_datablock_deserialize(const void *image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
{
- H5HL_dblk_t *dblk = NULL; /* Local heap data block deserialized */
- H5HL_cache_dblk_ud_t *udata = (H5HL_cache_dblk_ud_t *)_udata; /* User data for callback */
- H5HL_dblk_t *ret_value; /* Return value */
+ H5HL_dblk_t *dblk = NULL; /* Local heap data block deserialized */
+ H5HL_cache_dblk_ud_t *udata = (H5HL_cache_dblk_ud_t *)_udata; /* User data for callback */
+ void * ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ HDassert(image);
+ HDassert(len > 0);
HDassert(udata);
HDassert(udata->heap);
+ HDassert(udata->heap->dblk_size == len);
HDassert(!udata->heap->single_cache_obj);
HDassert(NULL == udata->heap->dblk);
+ HDassert(dirty);
/* Allocate space in memory for the heap data block */
if(NULL == (dblk = H5HL_dblk_new(udata->heap)))
@@ -653,9 +694,8 @@ H5HL_datablock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
if(NULL == (udata->heap->dblk_image = H5FL_BLK_MALLOC(lheap_chunk, udata->heap->dblk_size)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate data block image buffer")
- /* Read local heap data block */
- if(H5F_block_read(f, H5FD_MEM_LHEAP, udata->heap->dblk_addr, udata->heap->dblk_size, dxpl_id, udata->heap->dblk_image) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "unable to read local heap data block")
+ /* copy the datablock from the read buffer */
+ HDmemcpy(udata->heap->dblk_image, image, len);
/* Build free list */
if(H5HL__fl_deserialize(udata->heap) < 0)
@@ -675,180 +715,134 @@ done:
HDONE_ERROR(H5E_HEAP, H5E_CANTRELEASE, NULL, "unable to destroy local heap data block")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HL_datablock_load() */
+} /* end H5HL__cache_datablock_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5HL_datablock_flush
+ * Function: H5HL__cache_datablock_image_len
*
- * Purpose: Flushes a heap's data block from memory to disk if it's dirty.
- * Optionally deletes the heap data block from memory.
+ * Purpose: Return the size of the on disk image of the datablock.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jul 17 1997
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5HL_datablock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- void *_thing, unsigned H5_ATTR_UNUSED * flags_ptr)
+H5HL__cache_datablock_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- H5HL_dblk_t *dblk = (H5HL_dblk_t *)_thing; /* Pointer to the local heap data block */
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5HL_dblk_t *dblk = (const H5HL_dblk_t *)_thing; /* Pointer to the local heap data block */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /* check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(dblk);
+ HDassert(dblk->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(dblk->cache_info.type == H5AC_LHEAP_DBLK);
HDassert(dblk->heap);
- HDassert(!dblk->heap->single_cache_obj);
-
- if(dblk->cache_info.is_dirty) {
- H5HL_t *heap = dblk->heap; /* Pointer to the local heap */
-
- /* Update the free block value from the free list */
- heap->free_block = heap->freelist ? heap->freelist->offset : H5HL_FREE_NULL;
+ HDassert(dblk->heap->dblk_size > 0);
+ HDassert(image_len);
- /* Serialize the free list into the heap data's image */
- H5HL__fl_serialize(heap);
-
- /* Write the data block to the file */
- if(H5F_block_write(f, H5FD_MEM_LHEAP, heap->dblk_addr, heap->dblk_size, dxpl_id, heap->dblk_image) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "unable to write heap data block to file")
-
- dblk->cache_info.is_dirty = FALSE;
- } /* end if */
+ *image_len = dblk->heap->dblk_size;
- /* Should we destroy the memory version? */
- if(destroy)
- if(H5HL_datablock_dest(f, dblk) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy local heap data block")
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HL__cache_datablock_image_len() */
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HL_datablock_flush() */
+/*******************************************/
+/* no H5HL_cache_datablock_pre_serialize() */
+/*******************************************/
/*-------------------------------------------------------------------------
- * Function: H5HL_datablock_dest
+ * Function: H5HL__cache_datablock_serialize
*
- * Purpose: Destroys a local heap data block in memory.
+ * Purpose: Serialize the supplied datablock, and copy the serialized
+ * image into the supplied image buffer.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Jan 15 2003
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5HL_datablock_dest(H5F_t *f, void *_thing)
+H5HL__cache_datablock_serialize(const H5F_t *f, void *image, size_t len,
+ void *_thing)
{
- H5HL_dblk_t *dblk = (H5HL_dblk_t *)_thing; /* Pointer to the local heap data block */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5HL_t *heap; /* Pointer to the local heap */
+ H5HL_dblk_t *dblk = (H5HL_dblk_t *)_thing; /* Pointer to the local heap data block */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
/* Check arguments */
+ HDassert(f);
+ HDassert(image);
HDassert(dblk);
+ HDassert(dblk->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(dblk->cache_info.type == H5AC_LHEAP_DBLK);
HDassert(dblk->heap);
- HDassert(!dblk->heap->single_cache_obj);
- HDassert(H5F_addr_eq(dblk->cache_info.addr, dblk->heap->dblk_addr));
+ heap = dblk->heap;
+ HDassert(heap->dblk_size == len);
+ HDassert(!heap->single_cache_obj);
- /* Verify that entry is clean */
- HDassert(dblk->cache_info.is_dirty == FALSE);
+ /* Update the free block value from the free list */
+ heap->free_block = heap->freelist ? heap->freelist->offset : H5HL_FREE_NULL;
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!dblk->cache_info.free_file_space_on_destroy || H5F_addr_defined(dblk->cache_info.addr));
+ /* Serialize the free list into the heap data's image */
+ H5HL__fl_serialize(heap);
- /* Check for freeing file space for local heap data block */
- if(dblk->cache_info.free_file_space_on_destroy) {
- /* Free the local heap data block on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_LHEAP, H5AC_dxpl_id, dblk->cache_info.addr, (hsize_t)dblk->heap->dblk_size) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free local heap data block")
- } /* end if */
+ /* Copy the heap's data block into the cache's image */
+ HDmemcpy(image, heap->dblk_image, heap->dblk_size);
- /* Destroy local heap data block */
- if(H5HL_dblk_dest(dblk) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRELEASE, FAIL, "can't destroy local heap data block")
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HL__cache_datablock_serialize() */
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HL_datablock_dest() */
+/*********************************************/
+/* no H5HL_cache_datablock_notify() function */
+/*********************************************/
/*-------------------------------------------------------------------------
- * Function: H5HL_datablock_clear
+ * Function: H5HL__cache_datablock_free_icr
*
- * Purpose: Mark a local heap data block in memory as non-dirty.
+ * Purpose: Free the in memory representation of the supplied local heap data block.
*
- * Return: Non-negative on success/Negative on failure
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Mar 20 2003
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 6/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5HL_datablock_clear(H5F_t *f, void *_thing, hbool_t destroy)
+H5HL__cache_datablock_free_icr(void *_thing)
{
H5HL_dblk_t *dblk = (H5HL_dblk_t *)_thing; /* Pointer to the local heap data block */
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(dblk);
+ HDassert(dblk->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(dblk->cache_info.type == H5AC_LHEAP_DBLK);
- /* Mark local heap data block as clean */
- dblk->cache_info.is_dirty = FALSE;
-
- if(destroy)
- if(H5HL_datablock_dest(f, dblk) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy local heap data block")
+ /* Destroy the data block */
+ if(H5HL_dblk_dest(dblk) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy local heap data block")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HL_datablock_clear() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5HL_datablock_size
- *
- * Purpose: Compute the size in bytes of the local heap data block on disk,
- * and return it in *len_ptr. On failure, the value of *len_ptr
- * is undefined.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 5/13/04
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5HL_datablock_size(const H5F_t H5_ATTR_UNUSED *f, const void *_thing, size_t *size_ptr)
-{
- const H5HL_dblk_t *dblk = (const H5HL_dblk_t *)_thing; /* Pointer to the local heap data block */
-
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* check arguments */
- HDassert(dblk);
- HDassert(dblk->heap);
- HDassert(size_ptr);
-
- /* Set size of data block in cache */
- *size_ptr = dblk->heap->dblk_size;
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HL_datablock_size() */
+} /* end H5HL__cache_datablock_free_icr() */
diff --git a/src/H5HLdbg.c b/src/H5HLdbg.c
index 4ac22b8..edddd65 100644
--- a/src/H5HLdbg.c
+++ b/src/H5HLdbg.c
@@ -69,7 +69,7 @@ H5HL_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent, int
HDassert(indent >= 0);
HDassert(fwidth >= 0);
- if(NULL == (h = (H5HL_t *)H5HL_protect(f, dxpl_id, addr, H5AC_READ)))
+ if(NULL == (h = (H5HL_t *)H5HL_protect(f, dxpl_id, addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load heap")
HDfprintf(stream, "%*sLocal Heap...\n", indent, "");
diff --git a/src/H5HLpkg.h b/src/H5HLpkg.h
index 880cc05..75c9959 100644
--- a/src/H5HLpkg.h
+++ b/src/H5HLpkg.h
@@ -122,12 +122,16 @@ struct H5HL_prfx_t {
/* Callback information for loading local heap prefix from disk */
typedef struct H5HL_cache_prfx_ud_t {
/* Downwards */
+ hbool_t made_attempt; /* Whether the deserialize routine */
+ /* was already attempted */
size_t sizeof_size; /* Size of file sizes */
size_t sizeof_addr; /* Size of file addresses */
haddr_t prfx_addr; /* Address of prefix */
size_t sizeof_prfx; /* Size of heap prefix */
/* Upwards */
+ hbool_t loaded; /* Whether prefix was loaded */
+ /* from file */
} H5HL_cache_prfx_ud_t;
/* Callback information for loading local heap data block from disk */
diff --git a/src/H5HLprivate.h b/src/H5HLprivate.h
index 0b044b6..3035689 100644
--- a/src/H5HLprivate.h
+++ b/src/H5HLprivate.h
@@ -61,7 +61,7 @@ typedef struct H5HL_t H5HL_t;
* Library prototypes...
*/
H5_DLL herr_t H5HL_create(H5F_t *f, hid_t dxpl_id, size_t size_hint, haddr_t *addr/*out*/);
-H5_DLL H5HL_t *H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw);
+H5_DLL H5HL_t *H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, unsigned flags);
H5_DLL void *H5HL_offset_into(const H5HL_t *heap, size_t offset);
H5_DLL herr_t H5HL_remove(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, size_t offset,
size_t size);
diff --git a/src/H5O.c b/src/H5O.c
index 22086fa..d0ace29 100644
--- a/src/H5O.c
+++ b/src/H5O.c
@@ -1652,7 +1652,7 @@ done:
*-------------------------------------------------------------------------
*/
H5O_t *
-H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
+H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, unsigned prot_flags)
{
H5O_t *oh = NULL; /* Object header protected */
H5O_cache_ud_t udata; /* User data for protecting object header */
@@ -1666,13 +1666,16 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
HDassert(loc);
HDassert(loc->file);
+ /* prot_flags may only contain the H5AC__READ_ONLY_FLAG */
+ HDassert((prot_flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
+
/* Check for valid address */
if(!H5F_addr_defined(loc->addr))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "address undefined")
/* Check for write access on the file */
file_intent = H5F_INTENT(loc->file);
- if((prot == H5AC_WRITE) && (0 == (file_intent & H5F_ACC_RDWR)))
+ if((0 == (prot_flags & H5AC__READ_ONLY_FLAG)) && (0 == (file_intent & H5F_ACC_RDWR)))
HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "no write intent on file")
/* Construct the user data for protect callback */
@@ -1688,7 +1691,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
udata.common.addr = loc->addr;
/* Lock the object header into the cache */
- if(NULL == (oh = (H5O_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR, loc->addr, &udata, prot)))
+ if(NULL == (oh = (H5O_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR, loc->addr, &udata, prot_flags)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header")
/* Check if there are any continuation messages to process */
@@ -1725,7 +1728,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
/* (which adds to the object header) */
chk_udata.common.addr = cont_msg_info.msgs[curr_msg].addr;
chk_udata.size = cont_msg_info.msgs[curr_msg].size;
- if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR_CHK, cont_msg_info.msgs[curr_msg].addr, &chk_udata, prot)))
+ if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR_CHK, cont_msg_info.msgs[curr_msg].addr, &chk_udata, prot_flags)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header chunk")
/* Sanity check */
@@ -1769,7 +1772,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
/* (object header will have been marked dirty during protect, if we
* have write access -QAK)
*/
- if(prot != H5AC_WRITE)
+ if((prot_flags & H5AC__READ_ONLY_FLAG) != 0)
oh->prefix_modified = TRUE;
#ifndef NDEBUG
else {
@@ -1787,7 +1790,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
} /* end if */
/* Check for any messages that were modified while being read in */
- if(udata.common.mesgs_modified && prot != H5AC_WRITE)
+ if(udata.common.mesgs_modified && (0 == (prot_flags & H5AC__READ_ONLY_FLAG)))
oh->mesgs_modified = TRUE;
/* Reset the field that contained chunk 0's size during speculative load */
@@ -1797,7 +1800,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
/* Take care of loose ends for modifications made while bringing in the
* object header & chunks.
*/
- if(prot == H5AC_WRITE) {
+ if(0 == (prot_flags & H5AC__READ_ONLY_FLAG)) {
/* Check for the object header prefix being modified somehow */
/* (usually through updating the # of object header messages) */
if(oh->prefix_modified) {
@@ -1883,7 +1886,7 @@ H5O_pin(const H5O_loc_t *loc, hid_t dxpl_id)
HDassert(loc);
/* Get header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_WRITE)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to protect object header")
/* Increment the reference count on the object header */
@@ -2096,7 +2099,7 @@ H5O_touch(const H5O_loc_t *loc, hbool_t force, hid_t dxpl_id)
HDassert(loc);
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_WRITE)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Create/Update the modification time message */
@@ -2212,7 +2215,7 @@ H5O_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr)
loc.holding_file = FALSE;
/* Get the object header information */
- if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC_WRITE)))
+ if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Delete object */
@@ -2296,7 +2299,7 @@ H5O_obj_type(const H5O_loc_t *loc, H5O_type_t *obj_type, hid_t dxpl_id)
FUNC_ENTER_NOAPI_TAG(dxpl_id, loc->addr, FAIL)
/* Load the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Retrieve the type of the object */
@@ -2374,7 +2377,7 @@ H5O_obj_class(const H5O_loc_t *loc, hid_t dxpl_id)
FUNC_ENTER_NOAPI_NOINIT_TAG(dxpl_id, loc->addr, NULL)
/* Load the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header")
/* Test whether entry qualifies as a particular type of object */
@@ -2671,7 +2674,7 @@ H5O_get_hdr_info(const H5O_loc_t *loc, hid_t dxpl_id, H5O_hdr_info_t *hdr)
HDmemset(hdr, 0, sizeof(*hdr));
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unable to load object header")
/* Get the information for the object header */
@@ -2795,7 +2798,7 @@ H5O_get_info(const H5O_loc_t *loc, hid_t dxpl_id, hbool_t want_ih_info,
HDassert(oinfo);
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Reset the object info structure */
@@ -2916,7 +2919,7 @@ H5O_get_create_plist(const H5O_loc_t *loc, hid_t dxpl_id, H5P_genplist_t *oc_pli
HDassert(oc_plist);
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Set property values, if they were used for the object */
@@ -2971,7 +2974,7 @@ H5O_get_nlinks(const H5O_loc_t *loc, hid_t dxpl_id, hsize_t *nlinks)
HDassert(nlinks);
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Retrieve the # of link messages seen when the object header was loaded */
@@ -3088,7 +3091,7 @@ H5O_get_rc_and_type(const H5O_loc_t *loc, hid_t dxpl_id, unsigned *rc, H5O_type_
HDassert(loc);
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Set the object's reference count */
@@ -3461,7 +3464,7 @@ H5O_dec_rc_by_loc(const H5O_loc_t *loc, hid_t dxpl_id)
HDassert(loc);
/* Get header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Decrement the reference count on the object header */
diff --git a/src/H5Oattribute.c b/src/H5Oattribute.c
index 46e7ce4..52f414b 100644
--- a/src/H5Oattribute.c
+++ b/src/H5Oattribute.c
@@ -483,7 +483,7 @@ H5O_attr_open_by_name(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
HDassert(name);
/* Protect the object header to iterate over */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, NULL, "unable to load object header")
/* Check for attribute info stored */
@@ -632,7 +632,7 @@ H5O_attr_open_by_idx(const H5O_loc_t *loc, H5_index_t idx_type,
HGOTO_ERROR(H5E_ATTR, H5E_BADITER, NULL, "can't locate attribute")
/* Protect the object header to iterate over */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, NULL, "unable to load object header")
/* Find out whether it has already been opened. If it has, close the object
@@ -1283,7 +1283,7 @@ H5O_attr_iterate_real(hid_t loc_id, const H5O_loc_t *loc, hid_t dxpl_id,
HDassert(attr_op);
/* Protect the object header to iterate over */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@@ -1846,7 +1846,7 @@ H5O_attr_exists(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
HDassert(name);
/* Protect the object header to iterate over */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@@ -2000,7 +2000,7 @@ H5O_attr_count(const H5O_loc_t *loc, hid_t dxpl_id)
HDassert(loc);
/* Protect the object header to iterate over */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Retrieve # of attributes on object */
diff --git a/src/H5Ocache.c b/src/H5Ocache.c
index 816e06a..db1bd1f 100644
--- a/src/H5Ocache.c
+++ b/src/H5Ocache.c
@@ -68,17 +68,25 @@
/********************/
/* Metadata cache callbacks */
-static H5O_t *H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5O_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5O_t *oh, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5O_dest(H5F_t *f, H5O_t *oh);
-static herr_t H5O_clear(H5F_t *f, H5O_t *oh, hbool_t destroy);
-static herr_t H5O_size(const H5F_t *f, const H5O_t *oh, size_t *size_ptr);
-
-static H5O_chunk_proxy_t *H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5O_cache_chk_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5O_chunk_proxy_t *chk_proxy, unsigned H5_ATTR_UNUSED * flags_ptr);
-static herr_t H5O_cache_chk_dest(H5F_t *f, H5O_chunk_proxy_t *chk_proxy);
-static herr_t H5O_cache_chk_clear(H5F_t *f, H5O_chunk_proxy_t *chk_proxy, hbool_t destroy);
-static herr_t H5O_cache_chk_size(const H5F_t *f, const H5O_chunk_proxy_t *chk_proxy, size_t *size_ptr);
+static herr_t H5O__cache_get_load_size(const void *udata, size_t *image_len);
+static void *H5O__cache_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5O__cache_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5O__cache_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5O__cache_free_icr(void *thing);
+static herr_t H5O__cache_clear(const H5F_t *f, void *thing, hbool_t about_to_destroy);
+
+static herr_t H5O__cache_chk_get_load_size(const void *udata, size_t *image_len);
+static void *H5O__cache_chk_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5O__cache_chk_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5O__cache_chk_serialize(const H5F_t *f, void *image, size_t len,
+ void *thing);
+static herr_t H5O__cache_chk_free_icr(void *thing);
+static herr_t H5O__cache_chk_clear(const H5F_t *f, void *thing, hbool_t about_to_destroy);
/* Chunk proxy routines */
static herr_t H5O__chunk_proxy_dest(H5O_chunk_proxy_t *chunk_proxy);
@@ -99,24 +107,36 @@ static herr_t H5O__add_cont_msg(H5O_cont_msgs_t *cont_msg_info,
/* H5O object header prefix inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_OHDR[1] = {{
- H5AC_OHDR_ID,
- (H5AC_load_func_t)H5O_load,
- (H5AC_flush_func_t)H5O_flush,
- (H5AC_dest_func_t)H5O_dest,
- (H5AC_clear_func_t)H5O_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5O_size,
+ H5AC_OHDR_ID, /* Metadata client ID */
+ "object header", /* Metadata client name (for debugging) */
+ H5FD_MEM_OHDR, /* File space memory type for client */
+ H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
+ H5O__cache_get_load_size, /* 'get_load_size' callback */
+ H5O__cache_deserialize, /* 'deserialize' callback */
+ H5O__cache_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5O__cache_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5O__cache_free_icr, /* 'free_icr' callback */
+ H5O__cache_clear, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
/* H5O object header chunk inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_OHDR_CHK[1] = {{
- H5AC_OHDR_CHK_ID,
- (H5AC_load_func_t)H5O_cache_chk_load,
- (H5AC_flush_func_t)H5O_cache_chk_flush,
- (H5AC_dest_func_t)H5O_cache_chk_dest,
- (H5AC_clear_func_t)H5O_cache_chk_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5O_cache_chk_size,
+ H5AC_OHDR_CHK_ID, /* Metadata client ID */
+ "object header continuation chunk", /* Metadata client name (for debugging) */
+ H5FD_MEM_OHDR, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5O__cache_chk_get_load_size, /* 'get_load_size' callback */
+ H5O__cache_chk_deserialize, /* 'deserialize' callback */
+ H5O__cache_chk_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5O__cache_chk_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5O__cache_chk_free_icr, /* 'free_icr' callback */
+ H5O__cache_chk_clear, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
/* Declare external the free list for H5O_unknown_t's */
@@ -141,55 +161,75 @@ H5FL_SEQ_DEFINE(H5O_cont_t);
/*-------------------------------------------------------------------------
- * Function: H5O_load
+ * Function: H5O__cache_get_load_size()
*
- * Purpose: Loads an object header from disk.
+ * Purpose: Tell the metadata cache how much data to read from file in
+ * the first speculative read for the object header. Note that we do
+ * not have to be concerned about reading past the end of file, as the
+ * cache will clamp the read to avoid this if needed.
*
- * Return: Success: Pointer to the new object header.
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Failure: NULL
+ * Programmer: John Mainzer
+ * 7/28/14
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Aug 5 1997
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__cache_get_load_size(const void H5_ATTR_UNUSED *_udata, size_t *image_len)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image_len);
+
+ *image_len = H5O_SPEC_READ_SIZE;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5O__cache_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__cache_deserialize
+ *
+ * Purpose: Attempt to deserialize the object header contained in the
+ * supplied buffer, load the data into an instance of H5O_t, and
+ * return a pointer to the new instance.
+ *
+ * Note that the object header is read with with a speculative read.
+ * If the initial read is too small, make note of this fact and return
+ * without error. H5C_load_entry() will note the size discrepency
+ * and retry the deserialize operation with the correct size read.
+ *
+ * Return: Success: Pointer to in core representation
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
-static H5O_t *
-H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+static void *
+H5O__cache_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t *dirty)
{
- H5O_t *oh = NULL; /* Object header read in */
- H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */
- H5WB_t *wb = NULL; /* Wrapped buffer for prefix data */
- uint8_t read_buf[H5O_SPEC_READ_SIZE]; /* Buffer for speculative read */
- const uint8_t *p; /* Pointer into buffer to decode */
- uint8_t *buf; /* Buffer to decode */
- size_t spec_read_size; /* Size of buffer to speculatively read in */
- size_t prefix_size; /* Size of object header prefix */
- size_t buf_size; /* Size of prefix+chunk #0 buffer */
- haddr_t eoa; /* Relative end of file address */
- H5O_t *ret_value; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
+ H5O_t *oh = NULL; /* Object header read in */
+ H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into buffer to decode */
+ size_t prefix_size; /* Size of object header prefix */
+ size_t buf_size; /* Size of prefix+chunk #0 buffer */
+ void * ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ HDassert(image);
+ HDassert(len > 0);
HDassert(udata);
HDassert(udata->common.f);
HDassert(udata->common.cont_msg_info);
-
- /* Make certain we don't speculatively read off the end of the file */
- if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, H5FD_MEM_OHDR)))
- HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "unable to determine file size")
-
- /* Compute the size of the speculative object header buffer */
- H5_CHECKED_ASSIGN(spec_read_size, size_t, MIN(eoa - addr, H5O_SPEC_READ_SIZE), hsize_t);
-
- /* Attempt to speculatively read both object header prefix and first chunk */
- if(H5F_block_read(f, H5FD_MEM_OHDR, addr, spec_read_size, dxpl_id, read_buf) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_READERROR, NULL, "unable to read object header")
- p = read_buf;
+ HDassert(dirty);
/* Allocate space for the object header data structure */
if(NULL == (oh = H5FL_CALLOC(H5O_t)))
@@ -201,18 +241,18 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Check for presence of magic number */
/* (indicates version 2 or later) */
- if(!HDmemcmp(p, H5O_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) {
+ if(!HDmemcmp(image, H5O_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) {
/* Magic number */
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Version */
- oh->version = *p++;
- if(H5O_VERSION_2 != oh->version)
+ oh->version = *image++;
+ if(H5O_VERSION_2 != oh->version)
HGOTO_ERROR(H5E_OHDR, H5E_VERSION, NULL, "bad object header version number")
/* Flags */
- oh->flags = *p++;
- if(oh->flags & ~H5O_HDR_ALL_FLAGS)
+ oh->flags = *image++;
+ if(oh->flags & ~H5O_HDR_ALL_FLAGS)
HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "unknown object header status flag(s)")
/* Number of links to object (unless overridden by refcount message) */
@@ -222,13 +262,13 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
if(oh->flags & H5O_HDR_STORE_TIMES) {
uint32_t tmp; /* Temporary value */
- UINT32DECODE(p, tmp);
+ UINT32DECODE(image, tmp);
oh->atime = (time_t)tmp;
- UINT32DECODE(p, tmp);
+ UINT32DECODE(image, tmp);
oh->mtime = (time_t)tmp;
- UINT32DECODE(p, tmp);
+ UINT32DECODE(image, tmp);
oh->ctime = (time_t)tmp;
- UINT32DECODE(p, tmp);
+ UINT32DECODE(image, tmp);
oh->btime = (time_t)tmp;
} /* end if */
else
@@ -236,8 +276,9 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Attribute fields */
if(oh->flags & H5O_HDR_ATTR_STORE_PHASE_CHANGE) {
- UINT16DECODE(p, oh->max_compact);
- UINT16DECODE(p, oh->min_dense);
+ UINT16DECODE(image, oh->max_compact);
+ UINT16DECODE(image, oh->min_dense);
+
if(oh->max_compact < oh->min_dense)
HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad object header attribute phase change values")
} /* end if */
@@ -249,19 +290,19 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* First chunk size */
switch(oh->flags & H5O_HDR_CHUNK0_SIZE) {
case 0: /* 1 byte size */
- oh->chunk0_size = *p++;
+ oh->chunk0_size = *image++;
break;
case 1: /* 2 byte size */
- UINT16DECODE(p, oh->chunk0_size);
+ UINT16DECODE(image, oh->chunk0_size);
break;
case 2: /* 4 byte size */
- UINT32DECODE(p, oh->chunk0_size);
+ UINT32DECODE(image, oh->chunk0_size);
break;
case 3: /* 8 byte size */
- UINT64DECODE(p, oh->chunk0_size);
+ UINT64DECODE(image, oh->chunk0_size);
break;
default:
@@ -272,7 +313,7 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
} /* end if */
else {
/* Version */
- oh->version = *p++;
+ oh->version = *image++;
if(H5O_VERSION_1 != oh->version)
HGOTO_ERROR(H5E_OHDR, H5E_VERSION, NULL, "bad object header version number")
@@ -280,13 +321,13 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
oh->flags = H5O_CRT_OHDR_FLAGS_DEF;
/* Reserved */
- p++;
+ image++;
/* Number of messages */
- UINT16DECODE(p, udata->v1_pfx_nmesgs);
+ UINT16DECODE(image, udata->v1_pfx_nmesgs);
/* Link count */
- UINT32DECODE(p, oh->nlink);
+ UINT32DECODE(image, oh->nlink);
/* Reset unused time fields */
oh->atime = oh->mtime = oh->ctime = oh->btime = 0;
@@ -296,45 +337,36 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
oh->min_dense = 0;
/* First chunk size */
- UINT32DECODE(p, oh->chunk0_size);
- if((udata->v1_pfx_nmesgs > 0 && oh->chunk0_size < H5O_SIZEOF_MSGHDR_OH(oh)) ||
- (udata->v1_pfx_nmesgs == 0 && oh->chunk0_size > 0))
+ UINT32DECODE(image, oh->chunk0_size);
+
+ if((udata->v1_pfx_nmesgs > 0 &&
+ oh->chunk0_size < H5O_SIZEOF_MSGHDR_OH(oh)) ||
+ (udata->v1_pfx_nmesgs == 0 && oh->chunk0_size > 0))
HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad object header chunk size")
/* Reserved, in version 1 (for 8-byte alignment padding) */
- p += 4;
+ image += 4;
} /* end else */
/* Determine object header prefix length */
- prefix_size = (size_t)(p - (const uint8_t *)read_buf);
+ prefix_size = (size_t)(image - (const uint8_t *)_image);
HDassert((size_t)prefix_size == (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh)));
/* Compute the size of the buffer used */
buf_size = oh->chunk0_size + (size_t)H5O_SIZEOF_HDR(oh);
- /* Check if the speculative read was large enough to parse the first chunk */
- if(spec_read_size < buf_size) {
- /* Wrap the local buffer for serialized header info */
- if(NULL == (wb = H5WB_wrap(read_buf, sizeof(read_buf))))
- HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't wrap buffer")
-
- /* Get a pointer to a buffer that's large enough for serialized header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, buf_size)))
- HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "can't get actual buffer")
-
- /* Copy existing raw data into new buffer */
- HDmemcpy(buf, read_buf, spec_read_size);
-
- /* Read rest of the raw data */
- if(H5F_block_read(f, H5FD_MEM_OHDR, (addr + spec_read_size), (buf_size - spec_read_size), dxpl_id, (buf + spec_read_size)) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_READERROR, NULL, "unable to read object header data")
+ /* Check to see if the buffer provided is large enough to contain both
+ * the prefix and the first chunk. If it isn't, make note of the desired
+ * size, but otherwise do nothing. H5C_load_entry() will notice the
+ * discrepency, load the correct size buffer, and retry the deserialize.
+ */
+ if(len >= buf_size) {
+ /* Parse the first chunk */
+ if(H5O__chunk_deserialize(oh, udata->common.addr, oh->chunk0_size, (const uint8_t *)_image, &(udata->common), dirty) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't deserialize first object header chunk")
} /* end if */
else
- buf = read_buf;
-
- /* Parse the first chunk */
- if(H5O__chunk_deserialize(oh, udata->common.addr, oh->chunk0_size, buf, &(udata->common), &oh->cache_info.is_dirty) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't deserialize first object header chunk")
+ HDassert(!udata->made_attempt);
/* Note that we've loaded the object header from the file */
udata->made_attempt = TRUE;
@@ -343,202 +375,234 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
ret_value = oh;
done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_OHDR, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
-
/* Release the [possibly partially initialized] object header on errors */
if(!ret_value && oh)
if(H5O_free(oh) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTRELEASE, NULL, "unable to destroy object header data")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5O_load() */
+} /* end H5O__cache_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5O_flush
+ * Function: H5O__cache_image_len
*
- * Purpose: Flushes (and destroys) an object header.
+ * Purpose: Compute the size in bytes of the specified instance of
+ * H5O_t on disk, and return it in *image_len. On failure,
+ * the value of *image_len is undefined.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Aug 5 1997
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t H5_ATTR_UNUSED addr, H5O_t *oh, unsigned H5_ATTR_UNUSED * flags_ptr)
+H5O__cache_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5O_t *oh = (const H5O_t *)_thing; /* Object header to query */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
HDassert(oh);
+ HDassert(oh->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(oh->cache_info.type == H5AC_OHDR);
+ HDassert(image_len);
+
+ /* Report the object header's prefix+first chunk length */
+ if(oh->chunk0_size)
+ *image_len = (size_t)H5O_SIZEOF_HDR(oh) + oh->chunk0_size;
+ else
+ *image_len = oh->chunk[0].size;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5O__cache_image_len() */
- /* flush */
- if(oh->cache_info.is_dirty) {
- uint8_t *p; /* Pointer to object header prefix buffer */
+/********************************/
+/* no H5O_cache_pre_serialize() */
+/********************************/
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__cache_serialize
+ *
+ * Purpose: Serialize the contents of the supplied object header, and
+ * load this data into the supplied buffer.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 7/28/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__cache_serialize(const H5F_t *f, void *image, size_t len, void *_thing)
+{
+ H5O_t *oh = (H5O_t *)_thing; /* Object header to encode */
+ uint8_t *chunk_image; /* Pointer to object header prefix buffer */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check arguments */
+ HDassert(f);
+ HDassert(image);
+ HDassert(oh);
+ HDassert(oh->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(oh->cache_info.type == H5AC_OHDR);
+ HDassert(oh->chunk[0].size == len);
#ifdef H5O_DEBUG
-H5O_assert(oh);
+ H5O_assert(oh);
#endif /* H5O_DEBUG */
- /* Point to raw data 'image' for first chunk, which has room for the prefix */
- p = oh->chunk[0].image;
-
- /* Later versions of object header prefix have different format and
- * also require that chunk 0 always be updated, since the checksum
- * on the entire block of memory needs to be updated if anything is
- * modified */
- if(oh->version > H5O_VERSION_1) {
- uint64_t chunk0_size; /* Size of chunk 0's data */
+ /* Point to raw data 'image' for first chunk, which
+ * has room for the prefix
+ */
+ chunk_image = oh->chunk[0].image;
- HDassert(oh->chunk[0].size >= (size_t)H5O_SIZEOF_HDR(oh));
- chunk0_size = oh->chunk[0].size - (size_t)H5O_SIZEOF_HDR(oh);
+ /* Later versions of object header prefix have different format and
+ * also require that chunk 0 always be updated, since the checksum
+ * on the entire block of memory needs to be updated if anything is
+ * modified
+ */
+ if(oh->version > H5O_VERSION_1) {
+ uint64_t chunk0_size; /* Size of chunk 0's data */
- /* Verify magic number */
- HDassert(!HDmemcmp(p, H5O_HDR_MAGIC, H5_SIZEOF_MAGIC));
- p += H5_SIZEOF_MAGIC;
+ HDassert(oh->chunk[0].size >= (size_t)H5O_SIZEOF_HDR(oh));
+ chunk0_size = oh->chunk[0].size - (size_t)H5O_SIZEOF_HDR(oh);
- /* Version */
- *p++ = oh->version;
+ /* Verify magic number */
+ HDassert(!HDmemcmp(chunk_image, H5O_HDR_MAGIC, H5_SIZEOF_MAGIC));
+ chunk_image += H5_SIZEOF_MAGIC;
- /* Flags */
- *p++ = oh->flags;
+ /* Version */
+ *chunk_image++ = oh->version;
- /* Time fields */
- if(oh->flags & H5O_HDR_STORE_TIMES) {
- UINT32ENCODE(p, oh->atime);
- UINT32ENCODE(p, oh->mtime);
- UINT32ENCODE(p, oh->ctime);
- UINT32ENCODE(p, oh->btime);
- } /* end if */
+ /* Flags */
+ *chunk_image++ = oh->flags;
- /* Attribute fields */
- if(oh->flags & H5O_HDR_ATTR_STORE_PHASE_CHANGE) {
- UINT16ENCODE(p, oh->max_compact);
- UINT16ENCODE(p, oh->min_dense);
- } /* end if */
+ /* Time fields */
+ if(oh->flags & H5O_HDR_STORE_TIMES) {
+ UINT32ENCODE(chunk_image, oh->atime);
+ UINT32ENCODE(chunk_image, oh->mtime);
+ UINT32ENCODE(chunk_image, oh->ctime);
+ UINT32ENCODE(chunk_image, oh->btime);
+ } /* end if */
- /* First chunk size */
- switch(oh->flags & H5O_HDR_CHUNK0_SIZE) {
- case 0: /* 1 byte size */
- HDassert(chunk0_size < 256);
- *p++ = (uint8_t)chunk0_size;
- break;
-
- case 1: /* 2 byte size */
- HDassert(chunk0_size < 65536);
- UINT16ENCODE(p, chunk0_size);
- break;
-
- case 2: /* 4 byte size */
- /* use <= 2**32 -1 to stay within 4 bytes integer range */
- HDassert(chunk0_size <= 4294967295UL);
- UINT32ENCODE(p, chunk0_size);
- break;
-
- case 3: /* 8 byte size */
- UINT64ENCODE(p, chunk0_size);
- break;
-
- default:
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad size for chunk 0")
- } /* end switch */
+ /* Attribute fields */
+ if(oh->flags & H5O_HDR_ATTR_STORE_PHASE_CHANGE) {
+ UINT16ENCODE(chunk_image, oh->max_compact);
+ UINT16ENCODE(chunk_image, oh->min_dense);
} /* end if */
- else {
- /* Version */
- *p++ = oh->version;
- /* Reserved */
- *p++ = 0;
+ /* First chunk size */
+ switch(oh->flags & H5O_HDR_CHUNK0_SIZE) {
+ case 0: /* 1 byte size */
+ HDassert(chunk0_size < 256);
+ *chunk_image++ = (uint8_t)chunk0_size;
+ break;
+
+ case 1: /* 2 byte size */
+ HDassert(chunk0_size < 65536);
+ UINT16ENCODE(chunk_image, chunk0_size);
+ break;
+
+ case 2: /* 4 byte size */
+ /* use <= 2**32 -1 to stay within 4 bytes integer range */
+ HDassert(chunk0_size <= 4294967295UL);
+ UINT32ENCODE(chunk_image, chunk0_size);
+ break;
- /* Number of messages */
+ case 3: /* 8 byte size */
+ UINT64ENCODE(chunk_image, chunk0_size);
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad size for chunk 0")
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Version */
+ *chunk_image++ = oh->version;
+
+ /* Reserved */
+ *chunk_image++ = 0;
+
+ /* Number of messages */
#ifdef H5O_ENABLE_BAD_MESG_COUNT
- if(oh->store_bad_mesg_count)
- UINT16ENCODE(p, (oh->nmesgs - 1))
- else
+ if(oh->store_bad_mesg_count)
+ UINT16ENCODE(chunk_image, (oh->nmesgs - 1))
+ else
#endif /* H5O_ENABLE_BAD_MESG_COUNT */
- UINT16ENCODE(p, oh->nmesgs);
-
- /* Link count */
- UINT32ENCODE(p, oh->nlink);
+ UINT16ENCODE(chunk_image, oh->nmesgs);
- /* First chunk size */
- UINT32ENCODE(p, (oh->chunk[0].size - (size_t)H5O_SIZEOF_HDR(oh)));
+ /* Link count */
+ UINT32ENCODE(chunk_image, oh->nlink);
- /* Zero to alignment */
- HDmemset(p, 0, (size_t)(H5O_SIZEOF_HDR(oh) - 12));
- p += (size_t)(H5O_SIZEOF_HDR(oh) - 12);
- } /* end else */
- HDassert((size_t)(p - oh->chunk[0].image) == (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh)));
+ /* First chunk size */
+ UINT32ENCODE(chunk_image, (oh->chunk[0].size - (size_t)H5O_SIZEOF_HDR(oh)));
- /* Serialize messages for this chunk */
- if(H5O__chunk_serialize(f, oh, (unsigned)0) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize first object header chunk")
+ /* Zero to alignment */
+ HDmemset(chunk_image, 0, (size_t)(H5O_SIZEOF_HDR(oh) - 12));
+ chunk_image += (size_t)(H5O_SIZEOF_HDR(oh) - 12);
+ } /* end else */
- /* Write the chunk out */
- HDassert(H5F_addr_defined(oh->chunk[0].addr));
- if(H5F_block_write(f, H5FD_MEM_OHDR, oh->chunk[0].addr, oh->chunk[0].size, dxpl_id, oh->chunk[0].image) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_WRITEERROR, FAIL, "unable to write object header chunk to disk")
+ HDassert((size_t)(chunk_image - oh->chunk[0].image) == (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh)));
- /* Mark object header as clean now */
- oh->cache_info.is_dirty = FALSE;
- } /* end if */
+ /* Serialize messages for this chunk */
+ if(H5O__chunk_serialize(f, oh, (unsigned)0) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize first object header chunk")
- /* Destroy the object header, if requested */
- if(destroy)
- if(H5O_dest(f, oh) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to destroy object header data")
+ /* copy the chunk into the image -- this is potentially expensive.
+ * Can we rework things so that the object header and the cache
+ * share a buffer?
+ */
+ HDmemcpy(image, oh->chunk[0].image, len);
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5O_flush() */
+} /* end H5O__cache_serialize() */
+
+/**********************************/
+/* no H5O_cache_notify() function */
+/**********************************/
/*-------------------------------------------------------------------------
- * Function: H5O_dest
+ * Function: H5O__cache_free_icr
*
- * Purpose: Destroys an object header.
+ * Purpose: Free the in core representation of the supplied object header.
*
- * Return: Non-negative on success/Negative on failure
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Jan 15 2003
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_dest(H5F_t *f, H5O_t *oh)
-{
- herr_t ret_value = SUCCEED; /* Return value */
+H5O__cache_free_icr(void *_thing)
+{
+ H5O_t *oh = (H5O_t *)_thing; /* Object header to destroy */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(oh);
- HDassert(oh->rc == 0);
-
- /* Verify that node is clean */
- HDassert(!oh->cache_info.is_dirty);
-
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!oh->cache_info.free_file_space_on_destroy || H5F_addr_defined(oh->cache_info.addr));
-
- /* Check for releasing file space for object header */
- if(oh->chunk && oh->cache_info.free_file_space_on_destroy) {
- /* Free main (first) object header "chunk" */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_OHDR, H5AC_dxpl_id, oh->chunk[0].addr, (hsize_t)oh->chunk[0].size) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to free object header")
- } /* end if */
+ HDassert(oh->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(oh->cache_info.type == H5AC_OHDR);
/* Destroy object header */
if(H5O_free(oh) < 0)
@@ -546,170 +610,143 @@ H5O_dest(H5F_t *f, H5O_t *oh)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5O_dest() */
+} /* end H5O__cache_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5O_clear
+ * Function: H5O__cache_clear
*
- * Purpose: Mark a object header in memory as non-dirty.
+ * Purpose: Clear all dirty bits associated with this cache entry.
*
- * Return: Non-negative on success/Negative on failure
+ * This is ncessary as the object header cache client maintains
+ * its own dirty bits on individual messages. These dirty bits
+ * used to be cleared by the old V2 metadata cache flush callback,
+ * but now the metadata cache must clear them explicitly, as
+ * the serialize callback does not imply that the data has been
+ * written to disk.
*
- * Programmer: Quincey Koziol
- * koziol@ncsa.uiuc.edu
- * Mar 20 2003
- *
- * Changes: In the parallel case, there is the possibility that the
- * the object header may be flushed by different processes
- * over the life of the computation. Thus we must ensure
- * that the chunk images are up to date before we mark the
- * messages clean -- as otherwise we may overwrite valid
- * data with a blank section of a chunk image.
+ * This callback is also necessary for the parallel case.
*
- * To deal with this, I have added code to call
- * H5O_chunk_serialize() for all chunks before we
- * mark all messages as clean if we are not destroying the
- * object. Do this in the parallel case only, as the problem
- * can only occur in this context.
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * JRM -- 10/12/10
+ * Programmer: John Mainzer
+ * 9/22/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_clear(H5F_t *f, H5O_t *oh, hbool_t destroy)
-{
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED;
+H5O__cache_clear(const H5F_t *f, void *_thing, hbool_t about_to_destroy)
+{
+ H5O_t *oh = (H5O_t *)_thing; /* Object header to reset */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(oh);
+ HDassert(oh->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(oh->cache_info.type == H5AC_OHDR);
#ifdef H5_HAVE_PARALLEL
- if ( ( oh->cache_info.is_dirty ) && ( ! destroy ) ) {
-
- size_t i;
-
- /* scan through all chunks associated with the object header,
- * and cause them to update their images for all entries currently
- * marked dirty. Must do this in the parallel case, as it is possible
- * that this processor may clear this object header several times
- * before flushing it -- thus causing undefined sections of the image
- * to be written to disk overwriting valid data.
+ if((oh->nchunks > 0) && (!about_to_destroy)) {
+ /* Scan through chunk 0 (the chunk stored contiguously with this
+ * object header) and cause it to update its image of all entries
+ * currently marked dirty. Must do this in the parallel case, as
+ * it is possible that this processor may clear this object header
+ * several times before flushing it -- thus causing undefined
+ * sections of the image to be written to disk overwriting valid data.
*/
-
- for ( i = 0; i < oh->nchunks; i++ ) {
-
- if(H5O__chunk_serialize(f, oh, i) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize object header chunk")
- }
- }
+ if(H5O__chunk_serialize(f, oh, 0) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize object header chunk")
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
/* Mark messages stored with the object header (i.e. messages in chunk 0) as clean */
for(u = 0; u < oh->nmesgs; u++)
- oh->mesg[u].dirty = FALSE;
+ if(oh->mesg[u].chunkno == 0)
+ oh->mesg[u].dirty = FALSE;
#ifndef NDEBUG
/* Reset the number of messages dirtied by decoding */
oh->ndecode_dirtied = 0;
#endif /* NDEBUG */
- /* Mark whole header as clean */
- oh->cache_info.is_dirty = FALSE;
-
- if(destroy)
- if(H5O_dest(f, oh) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to destroy object header data")
-
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5O_clear() */
+} /* end H5O__cache_clear() */
/*-------------------------------------------------------------------------
- * Function: H5O_size
+ * Function: H5O__cache_chk_get_load_size()
*
- * Purpose: Compute the size in bytes of the specified instance of
- * H5O_t on disk, and return it in *len_ptr. On failure,
- * the value of *len_ptr is undefined.
+ * Purpose: Tell the metadata cache how large the on disk image of the
+ * chunk proxy is, so it can load the image into a buffer for the
+ * deserialize call. In this case, we simply look up the size in
+ * the user data, and return it in *image_len,
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: John Mainzer
- * 5/13/04
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_size(const H5F_t H5_ATTR_UNUSED *f, const H5O_t *oh, size_t *size_ptr)
+H5O__cache_chk_get_load_size(const void *_udata, size_t *image_len)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ const H5O_chk_cache_ud_t *udata = (const H5O_chk_cache_ud_t *)_udata; /* User data for callback */
- /* check args */
- HDassert(oh);
- HDassert(size_ptr);
+ FUNC_ENTER_STATIC_NOERR
- /* Report the object header's prefix+first chunk length */
- if(oh->chunk0_size)
- *size_ptr = (size_t)H5O_SIZEOF_HDR(oh) + oh->chunk0_size;
- else
- *size_ptr = oh->chunk[0].size;
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(udata->oh);
+ HDassert(image_len);
+
+ *image_len = udata->size;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5O_size() */
+} /* end H5O__cache_chk_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5O_cache_chk_load
+ * Function: H5O__cache_chk_deserialize
*
- * Purpose: Loads an object header continuation chunk from disk.
+ * Purpose: Attempt to deserialize the object header continuation chunk
+ * contained in the supplied buffer, load the data into an instance
+ * of H5O_chunk_proxy_t, and return a pointer to the new instance.
*
- * Return: Success: Pointer to the new object header chunk proxy.
- * Failure: NULL
+ * Return: Success: Pointer to in core representation
+ * Failure: NULL
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Jul 12 2008
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
-static H5O_chunk_proxy_t *
-H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+static void *
+H5O__cache_chk_deserialize(const void *image, size_t len, void *_udata,
+ hbool_t *dirty)
{
- H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk proxy object */
- H5O_chk_cache_ud_t *udata = (H5O_chk_cache_ud_t *)_udata; /* User data for callback */
- H5WB_t *wb = NULL; /* Wrapped buffer for prefix data */
- uint8_t chunk_buf[H5O_SPEC_READ_SIZE]; /* Buffer for speculative read */
- uint8_t *buf; /* Buffer to decode */
- H5O_chunk_proxy_t *ret_value; /* Return value */
+ H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk proxy object */
+ H5O_chk_cache_ud_t *udata = (H5O_chk_cache_ud_t *)_udata; /* User data for callback */
+ void * ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ HDassert(image);
+ HDassert(len > 0);
HDassert(udata);
HDassert(udata->oh);
+ HDassert(dirty);
/* Allocate space for the object header data structure */
- if(NULL == (chk_proxy = H5FL_CALLOC(H5O_chunk_proxy_t)))
- HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, NULL, "memory allocation failed")
-
- /* Wrap the local buffer for serialized header info */
- if(NULL == (wb = H5WB_wrap(chunk_buf, sizeof(chunk_buf))))
- HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't wrap buffer")
-
- /* Get a pointer to a buffer that's large enough for serialized header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, udata->size)))
- HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "can't get actual buffer")
-
- /* Read rest of the raw data */
- if(H5F_block_read(f, H5FD_MEM_OHDR, addr, udata->size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_READERROR, NULL, "unable to read object header continuation chunk")
+ if(NULL == (chk_proxy = H5FL_CALLOC(H5O_chunk_proxy_t)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, NULL, "memory allocation failed")
/* Check if we are still decoding the object header */
/* (as opposed to bringing a piece of it back from the file) */
@@ -719,7 +756,7 @@ H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HDassert(udata->common.cont_msg_info);
/* Parse the chunk */
- if(H5O__chunk_deserialize(udata->oh, udata->common.addr, udata->size, buf, &(udata->common), &chk_proxy->cache_info.is_dirty) < 0)
+ if(H5O__chunk_deserialize(udata->oh, udata->common.addr, udata->size, (const uint8_t *)image, &(udata->common), dirty) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't deserialize object header chunk")
/* Set the fields for the chunk proxy */
@@ -737,7 +774,7 @@ H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Sanity check that the chunk representation we have in memory is
* the same as the one being brought in from disk.
*/
- HDassert(0 == HDmemcmp(buf, chk_proxy->oh->chunk[chk_proxy->chunkno].image, chk_proxy->oh->chunk[chk_proxy->chunkno].size));
+ HDassert(0 == HDmemcmp(image, chk_proxy->oh->chunk[chk_proxy->chunkno].image, chk_proxy->oh->chunk[chk_proxy->chunkno].size));
} /* end else */
/* Increment reference count of object header */
@@ -748,213 +785,195 @@ H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
ret_value = chk_proxy;
done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_OHDR, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
-
- /* Release the [possibly partially initialized] object header on errors */
- if(!ret_value && chk_proxy)
- if(H5O__chunk_proxy_dest(chk_proxy) < 0)
- HDONE_ERROR(H5E_OHDR, H5E_CANTRELEASE, NULL, "unable to destroy object header chunk proxy")
-
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5O_cache_chk_load() */
+} /* end H5O__cache_chk_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5O_cache_chk_flush
+ * Function: H5O__cache_chk_image_len
*
- * Purpose: Flushes (and destroys) an object header continuation chunk.
+ * Purpose: Return the on disk image size of a object header chunk to the
+ * metadata cache via the image_len.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Jul 12 2008
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_cache_chk_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
- H5O_chunk_proxy_t *chk_proxy, unsigned H5_ATTR_UNUSED * flags_ptr)
+H5O__cache_chk_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
+ const H5O_chunk_proxy_t * chk_proxy = (const H5O_chunk_proxy_t *)_thing; /* Chunk proxy to query */
- /* flush */
- if(chk_proxy->cache_info.is_dirty) {
- /* Serialize messages for this chunk */
- if(H5O__chunk_serialize(f, chk_proxy->oh, chk_proxy->chunkno) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize object header continuation chunk")
+ FUNC_ENTER_STATIC_NOERR
- /* Write the chunk out */
- HDassert(H5F_addr_defined(chk_proxy->oh->chunk[chk_proxy->chunkno].addr));
- HDassert(H5F_addr_eq(addr, chk_proxy->oh->chunk[chk_proxy->chunkno].addr));
- if(H5F_block_write(f, H5FD_MEM_OHDR, addr, chk_proxy->oh->chunk[chk_proxy->chunkno].size, dxpl_id, chk_proxy->oh->chunk[chk_proxy->chunkno].image) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_WRITEERROR, FAIL, "unable to write object header continuation chunk to disk")
+ /* Check arguments */
+ HDassert(chk_proxy);
+ HDassert(chk_proxy->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(chk_proxy->cache_info.type == H5AC_OHDR_CHK);
+ HDassert(chk_proxy->oh);
+ HDassert(image_len);
- /* Mark object header as clean now */
- chk_proxy->cache_info.is_dirty = FALSE;
- } /* end if */
+ *image_len = chk_proxy->oh->chunk[chk_proxy->chunkno].size;
- /* Destroy the object header, if requested */
- if(destroy)
- if(H5O_cache_chk_dest(f, chk_proxy) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to destroy object header continuation chunk data")
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5O__cache_chk_image_len() */
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5O_cache_chk_flush() */
+/************************************/
+/* no H5O_cache_chk_pre_serialize() */
+/************************************/
/*-------------------------------------------------------------------------
- * Function: H5O_cache_chk_dest
+ * Function: H5O__cache_chk_serialize
*
- * Purpose: Destroys an object header continuation chunk.
+ * Purpose: Given a pointer to an instance of an object header chunk and an
+ * appropriately sized buffer, serialize the contents of the
+ * instance for writing to disk, and copy the serialized data
+ * into the buffer.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * July 12, 2008
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_cache_chk_dest(H5F_t *f, H5O_chunk_proxy_t *chk_proxy)
+H5O__cache_chk_serialize(const H5F_t *f, void *image, size_t len, void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5O_chunk_proxy_t * chk_proxy = (H5O_chunk_proxy_t *)_thing; /* Object header chunk to serialize */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
+ HDassert(f);
+ HDassert(image);
HDassert(chk_proxy);
- HDassert(chk_proxy->chunkno > 0);
-
- /* Verify that node is clean */
- HDassert(chk_proxy->cache_info.is_dirty == FALSE);
+ HDassert(chk_proxy->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(chk_proxy->cache_info.type == H5AC_OHDR_CHK);
+ HDassert(chk_proxy->oh);
+ HDassert(chk_proxy->oh->chunk[chk_proxy->chunkno].size == len);
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!chk_proxy->cache_info.free_file_space_on_destroy || H5F_addr_defined(chk_proxy->cache_info.addr));
-
- /* Check for releasing file space for object header */
- if(chk_proxy->cache_info.free_file_space_on_destroy) {
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_OHDR, H5AC_dxpl_id, chk_proxy->oh->chunk[chk_proxy->chunkno].addr, (hsize_t)chk_proxy->oh->chunk[chk_proxy->chunkno].size) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to free object header continuation chunk")
- } /* end if */
+ /* Serialize messages for this chunk */
+ if(H5O__chunk_serialize(f, chk_proxy->oh, chk_proxy->chunkno) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize object header continuation chunk")
- /* Destroy object header chunk proxy */
- if(H5O__chunk_proxy_dest(chk_proxy) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "unable to destroy object header chunk proxy")
+ /* copy the chunk into the image -- this is potentially expensive.
+ * Can we rework things so that the chunk and the cache share a buffer?
+ */
+ HDmemcpy(image, chk_proxy->oh->chunk[chk_proxy->chunkno].image, len);
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5O_cache_chk_dest() */
+} /* end H5O__cache_chk_serialize() */
+
+/**************************************/
+/* no H5O_cache_chk_notify() function */
+/**************************************/
/*-------------------------------------------------------------------------
- * Function: H5O_cache_chk_clear
- *
- * Purpose: Mark a object header continuation chunk in memory as non-dirty.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * July 12, 2008
+ * Function: H5O__cache_chk_free_icr
*
- * Changes: In the parallel case, there is the possibility that the
- * the object header chunk may be flushed by different
- * processes over the life of the computation. Thus we must
- * ensure that the chunk image is up to date before we mark its
- * messages clean -- as otherwise we may overwrite valid
- * data with a blank section of a chunk image.
+ * Purpose: Free the in core memory associated with the supplied object
+ * header continuation chunk.
*
- * To deal with this, I have added code to call
- * H5O_chunk_serialize() for this chunk before we
- * mark all messages as clean if we are not destroying the
- * chunk.
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
*
- * Do this in the parallel case only, as the problem
- * can only occur in this context.
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Note that at present at least, it seems that this fix
- * is not necessary, as we don't seem to be able to
- * generate a dirty chunk without creating a dirty object
- * header. However, the object header code will be changing
- * a lot in the near future, so I'll leave this fix in
- * for now, unless Quincey requests otherwise.
- *
- * JRM -- 10/12/10
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_cache_chk_clear(H5F_t *f, H5O_chunk_proxy_t *chk_proxy, hbool_t destroy)
+H5O__cache_chk_free_icr(void *_thing)
{
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED;
+ H5O_chunk_proxy_t * chk_proxy = (H5O_chunk_proxy_t *)_thing; /* Object header chunk proxy to release */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(chk_proxy);
+ HDassert(chk_proxy->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(chk_proxy->cache_info.type == H5AC_OHDR_CHK);
-#ifdef H5_HAVE_PARALLEL
- if((chk_proxy->oh->cache_info.is_dirty) && (!destroy))
- if(H5O__chunk_serialize(f, chk_proxy->oh, chk_proxy->chunkno) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize object header chunk")
-#endif /* H5_HAVE_PARALLEL */
-
- /* Mark messages in chunk as clean */
- for(u = 0; u < chk_proxy->oh->nmesgs; u++)
- if(chk_proxy->oh->mesg[u].chunkno == chk_proxy->chunkno)
- chk_proxy->oh->mesg[u].dirty = FALSE;
-
- /* Mark as clean */
- chk_proxy->cache_info.is_dirty = FALSE;
-
- if(destroy)
- if(H5O_cache_chk_dest(f, chk_proxy) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to destroy object header continuation chunk data")
+ /* Destroy object header chunk proxy */
+ if(H5O__chunk_proxy_dest(chk_proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "unable to destroy object header chunk proxy")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5O_cache_chk_clear() */
+} /* end H5O__cache_chk_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5O_cache_chk_size
+ * Function: H5O__cache_chk_clear
*
- * Purpose: Compute the size in bytes of the specified instance of
- * an object header continuation chunk on disk, and return it in
- * *len_ptr. On failure, the value of *len_ptr is undefined.
+ * Purpose: Clear all dirty bits associated with this cache entry.
*
- * Return: Non-negative on success/Negative on failure
+ * This is ncessary as the object header cache client maintains
+ * its own dirty bits on individual messages. These dirty bits
+ * used to be cleared by the old V2 metadata cache flush callback,
+ * but now the metadata cache must clear them explicitly, as
+ * the serialize callback does not imply that the data has been
+ * written to disk.
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * July 12, 2008
+ * This callback is also necessary for the parallel case.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 9/22/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_cache_chk_size(const H5F_t H5_ATTR_UNUSED *f, const H5O_chunk_proxy_t *chk_proxy, size_t *size_ptr)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+H5O__cache_chk_clear(const H5F_t *f, void *_thing, hbool_t about_to_destroy)
+{
+ H5O_chunk_proxy_t *chk_proxy = (H5O_chunk_proxy_t *)_thing; /* Object header chunk to reset */
+ H5O_t *oh; /* Object header for chunk */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* check args */
+ FUNC_ENTER_STATIC
+
+ /* Check arguments */
HDassert(chk_proxy);
- HDassert(size_ptr);
+ HDassert(chk_proxy->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(chk_proxy->cache_info.type == H5AC_OHDR_CHK);
+ oh = chk_proxy->oh;
+ HDassert(oh);
+ HDassert(oh->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(oh->cache_info.type == H5AC_OHDR);
- /* Report the object header continuation chunk's length */
- *size_ptr = chk_proxy->oh->chunk[chk_proxy->chunkno].size;
+#ifdef H5_HAVE_PARALLEL
+ if((chk_proxy->oh->cache_info.is_dirty) && (!about_to_destroy))
+ if(H5O__chunk_serialize(f, chk_proxy->oh, chk_proxy->chunkno) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize object header chunk")
+#endif /* H5_HAVE_PARALLEL */
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5O_cache_chk_size() */
+ /* Mark messages in chunk as clean */
+ for(u = 0; u < chk_proxy->oh->nmesgs; u++)
+ if(chk_proxy->oh->mesg[u].chunkno == chk_proxy->chunkno)
+ chk_proxy->oh->mesg[u].dirty = FALSE;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O__cache_chk_clear() */
/*-------------------------------------------------------------------------
@@ -1024,7 +1043,7 @@ static herr_t
H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image,
H5O_common_cache_ud_t *udata, hbool_t *dirty)
{
- const uint8_t *p; /* Pointer into buffer to decode */
+ const uint8_t *chunk_image; /* Pointer into buffer to decode */
uint8_t *eom_ptr; /* Pointer to end of messages for a chunk */
size_t curmesg; /* Current message being decoded in object header */
unsigned merged_null_msgs = 0; /* Number of null messages merged together */
@@ -1073,18 +1092,18 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
HDmemcpy(oh->chunk[chunkno].image, image, oh->chunk[chunkno].size);
/* Point into chunk image to decode */
- p = oh->chunk[chunkno].image;
+ chunk_image = oh->chunk[chunkno].image;
/* Handle chunk 0 as special case */
if(chunkno == 0)
/* Skip over [already decoded] prefix */
- p += (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh));
+ chunk_image += (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh));
/* Check for magic # on chunks > 0 in later versions of the format */
else if(chunkno > 0 && oh->version > H5O_VERSION_1) {
/* Magic number */
- if(HDmemcmp(p, H5O_CHK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(chunk_image, H5O_CHK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "wrong object header chunk signature")
- p += H5_SIZEOF_MAGIC;
+ chunk_image += H5_SIZEOF_MAGIC;
} /* end if */
/* Save # of messages already inspected */
@@ -1095,7 +1114,7 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
#ifndef NDEBUG
nullcnt = 0;
#endif /* NDEBUG */
- while(p < eom_ptr) {
+ while(chunk_image < eom_ptr) {
size_t mesgno; /* Current message to operate on */
size_t mesg_size; /* Size of message read in */
unsigned id; /* ID (type) of current message */
@@ -1106,20 +1125,20 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
/* Version # */
if(oh->version == H5O_VERSION_1)
- UINT16DECODE(p, id)
+ UINT16DECODE(chunk_image, id)
else
- id = *p++;
+ id = *chunk_image++;
/* Check for unknown message ID getting encoded in file */
if(id == H5O_UNKNOWN_ID)
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "'unknown' message ID encoded in file?!?")
/* Message size */
- UINT16DECODE(p, mesg_size);
+ UINT16DECODE(chunk_image, mesg_size);
HDassert(mesg_size == H5O_ALIGN_OH(oh, mesg_size));
/* Message flags */
- flags = *p++;
+ flags = *chunk_image++;
if(flags & ~H5O_MSG_FLAG_BITS)
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unknown flag for message")
if((flags & H5O_MSG_FLAG_SHARED) && (flags & H5O_MSG_FLAG_DONTSHARE))
@@ -1131,17 +1150,17 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
/* Reserved bytes/creation index */
if(oh->version == H5O_VERSION_1)
- p += 3; /*reserved*/
+ chunk_image += 3; /*reserved*/
else {
/* Only decode creation index if they are being tracked */
if(oh->flags & H5O_HDR_ATTR_CRT_ORDER_TRACKED)
- UINT16DECODE(p, crt_idx);
+ UINT16DECODE(chunk_image, crt_idx);
} /* end else */
/* Try to detect invalidly formatted object header message that
* extends past end of chunk.
*/
- if(p + mesg_size > eom_ptr)
+ if(chunk_image + mesg_size > eom_ptr)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, FAIL, "corrupt object header")
#ifndef NDEBUG
@@ -1177,7 +1196,7 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
oh->mesg[mesgno].flags = flags;
oh->mesg[mesgno].crt_idx = crt_idx;
oh->mesg[mesgno].native = NULL;
- oh->mesg[mesgno].raw = (uint8_t *)p; /* Casting away const OK - QAK */
+ oh->mesg[mesgno].raw = (uint8_t *)chunk_image; /* Casting away const OK - QAK */
oh->mesg[mesgno].raw_size = mesg_size;
oh->mesg[mesgno].chunkno = chunkno;
@@ -1233,10 +1252,10 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
} /* end else */
/* Advance decode pointer past message */
- p += mesg_size;
+ chunk_image += mesg_size;
/* Check for 'gap' at end of chunk */
- if((eom_ptr - p) > 0 && (eom_ptr - p) < H5O_SIZEOF_MSGHDR_OH(oh)) {
+ if((eom_ptr - chunk_image) > 0 && (eom_ptr - chunk_image) < H5O_SIZEOF_MSGHDR_OH(oh)) {
/* Gaps can only occur in later versions of the format */
HDassert(oh->version > H5O_VERSION_1);
@@ -1244,10 +1263,10 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
HDassert(nullcnt == 0);
/* Set gap information for chunk */
- oh->chunk[chunkno].gap = (size_t)(eom_ptr - p);
+ oh->chunk[chunkno].gap = (size_t)(eom_ptr - chunk_image);
/* Increment location in chunk */
- p += oh->chunk[chunkno].gap;
+ chunk_image += oh->chunk[chunkno].gap;
} /* end if */
} /* end while */
@@ -1257,7 +1276,7 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
uint32_t computed_chksum; /* Checksum computed in memory */
/* Metadata checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(chunk_image, stored_chksum);
/* Compute checksum on chunk */
computed_chksum = H5_checksum_metadata(oh->chunk[chunkno].image, (oh->chunk[chunkno].size - H5O_SIZEOF_CHKSUM), 0);
@@ -1268,7 +1287,7 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
} /* end if */
/* Sanity check */
- HDassert(p == oh->chunk[chunkno].image + oh->chunk[chunkno].size);
+ HDassert(chunk_image == oh->chunk[chunkno].image + oh->chunk[chunkno].size);
/* Do some inspection/interpretation of new messages from this chunk */
/* (detect continuation messages, ref. count messages, etc.) */
@@ -1390,7 +1409,7 @@ H5O__chunk_serialize(const H5F_t *f, H5O_t *oh, unsigned chunkno)
/* Extra work, for later versions of the format */
if(oh->version > H5O_VERSION_1) {
uint32_t metadata_chksum; /* Computed metadata checksum value */
- uint8_t *p; /* Pointer into object header chunk */
+ uint8_t *chunk_image; /* Pointer into object header chunk */
/* Check for gap in chunk & zero it out */
if(oh->chunk[chunkno].gap)
@@ -1401,8 +1420,8 @@ H5O__chunk_serialize(const H5F_t *f, H5O_t *oh, unsigned chunkno)
metadata_chksum = H5_checksum_metadata(oh->chunk[chunkno].image, (oh->chunk[chunkno].size - H5O_SIZEOF_CHKSUM), 0);
/* Metadata checksum */
- p = oh->chunk[chunkno].image + (oh->chunk[chunkno].size - H5O_SIZEOF_CHKSUM);
- UINT32ENCODE(p, metadata_chksum);
+ chunk_image = oh->chunk[chunkno].image + (oh->chunk[chunkno].size - H5O_SIZEOF_CHKSUM);
+ UINT32ENCODE(chunk_image, metadata_chksum);
} /* end if */
done:
@@ -1443,5 +1462,5 @@ H5O__chunk_proxy_dest(H5O_chunk_proxy_t *chk_proxy)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5O_chunk_proxy_dest() */
+} /* H5O__chunk_proxy_dest() */
diff --git a/src/H5Ochunk.c b/src/H5Ochunk.c
index 41d3f66..bb82ad1 100644
--- a/src/H5Ochunk.c
+++ b/src/H5Ochunk.c
@@ -184,7 +184,7 @@ H5O_chunk_protect(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
chk_udata.size = oh->chunk[idx].size;
/* Get the chunk proxy */
- if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE)))
+ if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header chunk")
/* Sanity check */
@@ -337,7 +337,7 @@ H5O_chunk_update_idx(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
chk_udata.size = oh->chunk[idx].size;
/* Get the chunk proxy */
- if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE)))
+ if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk")
/* Update index for chunk proxy in cache */
@@ -389,7 +389,7 @@ H5O_chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
chk_udata.size = oh->chunk[idx].size;
/* Get the chunk proxy */
- if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE)))
+ if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk")
/* Sanity check */
diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c
index afd8e94..66322da 100644
--- a/src/H5Ocopy.c
+++ b/src/H5Ocopy.c
@@ -379,7 +379,7 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/,
}
/* Get source object header */
- if(NULL == (oh_src = H5O_protect(oloc_src, dxpl_id, H5AC_READ)))
+ if(NULL == (oh_src = H5O_protect(oloc_src, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Retrieve user data for particular type of object to copy */
diff --git a/src/H5Odbg.c b/src/H5Odbg.c
index 0531a90..0388cd5 100644
--- a/src/H5Odbg.c
+++ b/src/H5Odbg.c
@@ -566,7 +566,7 @@ H5O_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int f
loc.addr = addr;
loc.holding_file = FALSE;
- if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* debug */
diff --git a/src/H5Oefl.c b/src/H5Oefl.c
index e37e9b7..a7ce31a 100644
--- a/src/H5Oefl.c
+++ b/src/H5Oefl.c
@@ -128,7 +128,7 @@ H5O_efl_decode(H5F_t *f, hid_t dxpl_id, H5O_t H5_ATTR_UNUSED *open_oh,
#ifndef NDEBUG
HDassert(H5F_addr_defined(mesg->heap_addr));
- if(NULL == (heap = H5HL_protect(f, dxpl_id, mesg->heap_addr, H5AC_READ)))
+ if(NULL == (heap = H5HL_protect(f, dxpl_id, mesg->heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "unable to read protect link value")
s = (const char *)H5HL_offset_into(heap, 0);
@@ -145,7 +145,7 @@ H5O_efl_decode(H5F_t *f, hid_t dxpl_id, H5O_t H5_ATTR_UNUSED *open_oh,
if(NULL == mesg->slot)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- if(NULL == (heap = H5HL_protect(f, dxpl_id, mesg->heap_addr, H5AC_READ)))
+ if(NULL == (heap = H5HL_protect(f, dxpl_id, mesg->heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "unable to read protect link value")
for(u = 0; u < mesg->nused; u++) {
/* Name */
@@ -487,7 +487,7 @@ H5O_efl_copy_file(H5F_t H5_ATTR_UNUSED *file_src, void *mesg_src, H5F_t *file_ds
HGOTO_ERROR(H5E_EFL, H5E_CANTINIT, NULL, "can't create heap")
/* Pin the heap down in memory */
- if(NULL == (heap = H5HL_protect(file_dst, dxpl_id, efl_dst->heap_addr, H5AC_WRITE)))
+ if(NULL == (heap = H5HL_protect(file_dst, dxpl_id, efl_dst->heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_EFL, H5E_PROTECT, NULL, "unable to protect EFL file name heap")
/* Insert "empty" name first */
diff --git a/src/H5Omessage.c b/src/H5Omessage.c
index d361194..c2756ea 100644
--- a/src/H5Omessage.c
+++ b/src/H5Omessage.c
@@ -476,7 +476,7 @@ H5O_msg_read(const H5O_loc_t *loc, unsigned type_id, void *mesg,
HDassert(type_id < NELMTS(H5O_msg_class_g));
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to protect object header")
/* Call the "real" read routine */
@@ -802,7 +802,7 @@ H5O_msg_count(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
HDassert(type);
/* Load the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Count the messages of the correct type */
@@ -884,7 +884,7 @@ H5O_msg_exists(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
HDassert(type_id < NELMTS(H5O_msg_class_g));
/* Load the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Call the "real" exists routine */
@@ -1224,7 +1224,7 @@ H5O_msg_iterate(const H5O_loc_t *loc, unsigned type_id,
HDassert(op);
/* Protect the object header to iterate over */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Call the "real" iterate routine */
@@ -2289,7 +2289,7 @@ H5O_msg_get_chunkno(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
HDassert(type);
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Locate message of correct type */
@@ -2344,7 +2344,7 @@ H5O_msg_lock(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
HDassert(type);
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Locate message of correct type */
@@ -2402,7 +2402,7 @@ H5O_msg_unlock(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
HDassert(type);
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Locate message of correct type */
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index 29f727d..a9a5949 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -639,6 +639,8 @@ typedef struct H5O_btreek_t {
* (Data structure in memory)
*/
typedef struct H5O_drvinfo_t {
+/* Information for H5AC cache functions, _must_ be first field in structure */
+ H5AC_info_t cache_info;
char name[9]; /* Driver name */
size_t len; /* Length of encoded buffer */
uint8_t *buf; /* Buffer for encoded info */
@@ -734,7 +736,7 @@ H5_DLL herr_t H5O_create(H5F_t *f, hid_t dxpl_id, size_t size_hint,
H5_DLL herr_t H5O_open(H5O_loc_t *loc);
H5_DLL herr_t H5O_close(H5O_loc_t *loc);
H5_DLL int H5O_link(const H5O_loc_t *loc, int adjust, hid_t dxpl_id);
-H5_DLL H5O_t *H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot);
+H5_DLL H5O_t *H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, unsigned prot_flags);
H5_DLL H5O_t *H5O_pin(const H5O_loc_t *loc, hid_t dxpl_id);
H5_DLL herr_t H5O_unpin(H5O_t *oh);
H5_DLL herr_t H5O_dec_rc_by_loc(const H5O_loc_t *loc, hid_t dxpl_id);
diff --git a/src/H5Otest.c b/src/H5Otest.c
index d1627ef..e7312ee 100644
--- a/src/H5Otest.c
+++ b/src/H5Otest.c
@@ -108,7 +108,7 @@ H5O_is_attr_dense_test(hid_t oid)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found")
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@@ -173,7 +173,7 @@ H5O_is_attr_empty_test(hid_t oid)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found")
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@@ -266,7 +266,7 @@ H5O_num_attrs_test(hid_t oid, hsize_t *nattrs)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found")
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@@ -361,7 +361,7 @@ H5O_attr_dense_info_test(hid_t oid, hsize_t *name_count, hsize_t *corder_count)
H5_BEGIN_TAG(H5AC_ind_dxpl_id, loc->addr, FAIL);
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR_TAG(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@@ -452,7 +452,7 @@ H5O_check_msg_marked_test(hid_t oid, hbool_t flag_val)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found")
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Locate "unknown" message */
@@ -511,7 +511,7 @@ H5O_expunge_chunks_test(const H5O_loc_t *loc, hid_t dxpl_id)
FUNC_ENTER_NOAPI(FAIL)
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_WRITE)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Safety check */
@@ -571,7 +571,7 @@ H5O_get_rc(const H5O_loc_t *loc, hid_t dxpl_id, unsigned *rc)
HDassert(rc);
/* Get the object header */
- if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Save the refcount for the object header */
diff --git a/src/H5Plapl.c b/src/H5Plapl.c
index 87b96b6..e3b3c4f 100644
--- a/src/H5Plapl.c
+++ b/src/H5Plapl.c
@@ -237,7 +237,7 @@ H5P_lacc_elink_fapl_enc(const void *value, void **_pp, size_t *size)
uint8_t **pp = (uint8_t **)_pp;
H5P_genplist_t *fapl_plist; /* Pointer to property list */
hbool_t non_default_fapl = FALSE; /* Whether the FAPL is non-default */
- size_t enc_size = 0; /* FAPL's encoded size */
+ size_t fapl_size = 0; /* FAPL's encoded size */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -257,13 +257,30 @@ H5P_lacc_elink_fapl_enc(const void *value, void **_pp, size_t *size)
/* Encode the property list, if non-default */
/* (if *pp == NULL, will only compute the size) */
if(non_default_fapl) {
- if(H5P__encode(fapl_plist, TRUE, *pp, &enc_size) < 0)
+ if(H5P__encode(fapl_plist, TRUE, NULL, &fapl_size) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTENCODE, FAIL, "can't encode property list")
- if(*pp)
- *pp += enc_size;
+
+ if(*pp) {
+ uint64_t enc_value;
+ unsigned enc_size;
+
+ /* encode the length of the plist */
+ enc_value = (uint64_t)fapl_size;
+ enc_size = H5VM_limit_enc_size(enc_value);
+ HDassert(enc_size < 256);
+ *(*pp)++ = (uint8_t)enc_size;
+ UINT64ENCODE_VAR(*pp, enc_value, enc_size);
+
+ /* encode the plist */
+ if(H5P__encode(fapl_plist, TRUE, *pp, &fapl_size) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTENCODE, FAIL, "can't encode property list")
+
+ *pp += fapl_size;
+ }
+ fapl_size += (1 + H5VM_limit_enc_size((uint64_t)fapl_size));
} /* end if */
- *size += (1 + enc_size); /* Non-default flag, plus encoded property list size */
+ *size += (1 + fapl_size); /* Non-default flag, plus encoded property list size */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -304,22 +321,21 @@ H5P_lacc_elink_fapl_dec(const void **_pp, void *_value)
non_default_fapl = (hbool_t)*(*pp)++;
if(non_default_fapl) {
- H5P_genplist_t *fapl_plist; /* Pointer to property list */
- size_t enc_size = 0; /* Encoded size of property list */
+ size_t fapl_size = 0; /* Encoded size of property list */
+ unsigned enc_size;
+ uint64_t enc_value;
+
+ /* Decode the plist length */
+ enc_size = *(*pp)++;
+ HDassert(enc_size < 256);
+ UINT64DECODE_VAR(*pp, enc_value, enc_size);
+ fapl_size = (size_t)enc_value;
/* Decode the property list */
if((*elink_fapl = H5P__decode(*pp)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTDECODE, FAIL, "can't decode property")
- /* Get the property list object */
- if(NULL == (fapl_plist = (H5P_genplist_t *)H5P_object_verify(*elink_fapl, H5P_FILE_ACCESS)))
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get property list")
-
- /* Compute the encoded size of the property list */
- if(H5P__encode(fapl_plist, TRUE, NULL, &enc_size) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTENCODE, FAIL, "can't compute encoded property list size")
-
- *pp += enc_size;
+ *pp += fapl_size;
} /* end if */
else
*elink_fapl = H5P_DEFAULT;
@@ -578,7 +594,7 @@ H5P_lacc_elink_pref_dec(const void **_pp, void *_value)
/* Decode the value */
UINT64DECODE_VAR(*pp, enc_value, enc_size);
- len = enc_value;
+ len = (size_t)enc_value;
if(0 != len) {
/* Make a copy of the user's prefix string */
diff --git a/src/H5SM.c b/src/H5SM.c
index abdb109..1d391f1 100644
--- a/src/H5SM.c
+++ b/src/H5SM.c
@@ -358,7 +358,7 @@ H5SM_type_shared(H5F_t *f, unsigned type_id, hid_t dxpl_id)
/* Set up user data for callback */
cache_udata.f = f;
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
} /* end if */
else
@@ -412,7 +412,7 @@ H5SM_get_fheap_addr(H5F_t *f, hid_t dxpl_id, unsigned type_id, haddr_t *fheap_ad
cache_udata.f = f;
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Look up index for message type */
@@ -831,7 +831,7 @@ H5SM_convert_btree_to_list(H5F_t * f, H5SM_index_header_t * header, hid_t dxpl_i
cache_udata.header = header;
/* Protect the SOHM list */
- if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM list index")
/* Delete the B-tree and have messages copy themselves to the
@@ -939,7 +939,7 @@ H5SM_can_share(H5F_t *f, hid_t dxpl_id, H5SM_master_table_t *table,
/* Set up user data for callback */
cache_udata.f = f;
- if(NULL == (my_table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
+ if(NULL == (my_table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
} /* end if */
@@ -1071,7 +1071,7 @@ H5SM_try_share(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, unsigned defer_flags,
cache_udata.f = f;
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_WRITE)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* "complex" sharing checks */
@@ -1277,7 +1277,7 @@ H5SM_write_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
cache_udata.header = header;
/* The index is a list; get it from the cache */
- if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, defer ? H5AC_READ : H5AC_WRITE)))
+ if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, defer ? H5AC__READ_ONLY_FLAG : H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index")
/* See if the message is already in the index and get its location.
@@ -1530,7 +1530,7 @@ H5SM_delete(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, H5O_shared_t *sh_mesg)
cache_udata.f = f;
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_WRITE)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Find the correct index and try to delete from it */
@@ -1799,7 +1799,7 @@ H5SM_delete_from_index(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
cache_udata.header = header;
/* If the index is stored as a list, get it from the cache */
- if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC_WRITE)))
+ if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index")
/* Find the message in the list */
@@ -1970,7 +1970,7 @@ H5SM_get_info(const H5O_loc_t *ext_loc, H5P_genplist_t *fc_plist, hid_t dxpl_id)
cache_udata.f = f;
/* Read the rest of the SOHM table information from the cache */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Get index conversion limits */
@@ -2133,7 +2133,7 @@ H5SM_get_refcount(H5F_t *f, hid_t dxpl_id, unsigned type_id,
tbl_cache_udata.f = f;
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &tbl_cache_udata, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &tbl_cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Find the correct index and find the message in it */
@@ -2172,7 +2172,7 @@ H5SM_get_refcount(H5F_t *f, hid_t dxpl_id, unsigned type_id,
lst_cache_udata.header = header;
/* If the index is stored as a list, get it from the cache */
- if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &lst_cache_udata, H5AC_READ)))
+ if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &lst_cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index")
/* Find the message in the list */
@@ -2375,7 +2375,7 @@ H5SM_read_mesg(H5F_t *f, const H5SM_sohm_t *mesg, H5HF_t *fheap,
HGOTO_ERROR(H5E_SOHM, H5E_CANTLOAD, FAIL, "unable to open object header")
/* Load the object header from the cache */
- if(NULL == (oh = H5O_protect(&oloc, dxpl_id, H5AC_READ)))
+ if(NULL == (oh = H5O_protect(&oloc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load object header")
} /* end if */
else
@@ -2530,7 +2530,7 @@ H5SM_table_debug(H5F_t *f, hid_t dxpl_id, haddr_t table_addr,
cache_udata.f = f;
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, table_addr, &cache_udata, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, table_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
HDfprintf(stream, "%*sShared Message Master Table...\n", indent, "");
@@ -2616,7 +2616,7 @@ H5SM_list_debug(H5F_t *f, hid_t dxpl_id, haddr_t list_addr,
cache_udata.header = &header;
/* Get the list from the cache */
- if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, list_addr, &cache_udata, H5AC_READ)))
+ if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, list_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index")
HDfprintf(stream, "%*sShared Message List Index...\n", indent, "");
@@ -2693,7 +2693,7 @@ H5SM_ih_size(H5F_t *f, hid_t dxpl_id, hsize_t *hdr_size, H5_ih_info_t *ih_info)
cache_udata.f = f;
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Get SOHM header size */
diff --git a/src/H5SMcache.c b/src/H5SMcache.c
index eaeb889..d8c0824 100644
--- a/src/H5SMcache.c
+++ b/src/H5SMcache.c
@@ -47,12 +47,6 @@
/* Local Macros */
/****************/
-/* Size of stack buffer for serialized tables */
-#define H5SM_TBL_BUF_SIZE 1024
-
-/* Size of stack buffer for serialized list indices */
-#define H5SM_LST_BUF_SIZE 1024
-
/******************/
/* Local Typedefs */
@@ -64,16 +58,23 @@
/********************/
/* Metadata cache (H5AC) callbacks */
-static H5SM_master_table_t *H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5SM_table_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_master_table_t *table);
-static herr_t H5SM_table_dest(H5F_t *f, H5SM_master_table_t* table);
-static herr_t H5SM_table_clear(H5F_t *f, H5SM_master_table_t *table, hbool_t destroy);
-static herr_t H5SM_table_size(const H5F_t *f, const H5SM_master_table_t *table, size_t *size_ptr);
-static H5SM_list_t *H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
-static herr_t H5SM_list_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_list_t *list);
-static herr_t H5SM_list_dest(H5F_t *f, H5SM_list_t* list);
-static herr_t H5SM_list_clear(H5F_t *f, H5SM_list_t *list, hbool_t destroy);
-static herr_t H5SM_list_size(const H5F_t *f, const H5SM_list_t H5_ATTR_UNUSED *list, size_t *size_ptr);
+static herr_t H5SM__cache_table_get_load_size(const void *udata, size_t *image_len);
+static void *H5SM__cache_table_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5SM__cache_table_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5SM__cache_table_serialize(const H5F_t *f, void *image,
+ size_t len, void *thing);
+static herr_t H5SM__cache_table_free_icr(void *thing);
+
+static herr_t H5SM__cache_list_get_load_size(const void *udata, size_t *image_len);
+static void *H5SM__cache_list_deserialize(const void *image, size_t len,
+ void *udata, hbool_t *dirty);
+static herr_t H5SM__cache_list_image_len(const void *thing, size_t *image_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static herr_t H5SM__cache_list_serialize(const H5F_t *f, void *image,
+ size_t len, void *thing);
+static herr_t H5SM__cache_list_free_icr(void *thing);
/*********************/
@@ -82,23 +83,35 @@ static herr_t H5SM_list_size(const H5F_t *f, const H5SM_list_t H5_ATTR_UNUSED *l
/* H5SM inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_SOHM_TABLE[1] = {{
- H5AC_SOHM_TABLE_ID,
- (H5AC_load_func_t)H5SM_table_load,
- (H5AC_flush_func_t)H5SM_table_flush,
- (H5AC_dest_func_t)H5SM_table_dest,
- (H5AC_clear_func_t)H5SM_table_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5SM_table_size,
+ H5AC_SOHM_TABLE_ID, /* Metadata client ID */
+ "shared message table", /* Metadata client name (for debugging) */
+ H5FD_MEM_SOHM_TABLE, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5SM__cache_table_get_load_size, /* 'get_load_size' callback */
+ H5SM__cache_table_deserialize, /* 'deserialize' callback */
+ H5SM__cache_table_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5SM__cache_table_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5SM__cache_table_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
const H5AC_class_t H5AC_SOHM_LIST[1] = {{
- H5AC_SOHM_LIST_ID,
- (H5AC_load_func_t)H5SM_list_load,
- (H5AC_flush_func_t)H5SM_list_flush,
- (H5AC_dest_func_t)H5SM_list_dest,
- (H5AC_clear_func_t)H5SM_list_clear,
- (H5AC_notify_func_t)NULL,
- (H5AC_size_func_t)H5SM_list_size,
+ H5AC_SOHM_LIST_ID, /* Metadata client ID */
+ "shared message list", /* Metadata client name (for debugging) */
+ H5FD_MEM_SOHM_TABLE, /* File space memory type for client */
+ H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
+ H5SM__cache_list_get_load_size, /* 'get_load_size' callback */
+ H5SM__cache_list_deserialize, /* 'deserialize' callback */
+ H5SM__cache_list_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5SM__cache_list_serialize, /* 'serialize' callback */
+ NULL, /* 'notify' callback */
+ H5SM__cache_list_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
}};
@@ -114,32 +127,76 @@ const H5AC_class_t H5AC_SOHM_LIST[1] = {{
/*-------------------------------------------------------------------------
- * Function: H5SM_table_load
+ * Function: H5SM__cache_table_get_load_size()
+ *
+ * Purpose: Return the size of the master table of Shared Object Header
+ * Message indexes on disk. As this cache client doesn't use
+ * speculative reads, this value should be accurate.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 7/28/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5SM__cache_table_get_load_size(const void *_udata, size_t *image_len)
+{
+ const H5SM_table_cache_ud_t *udata = (const H5SM_table_cache_ud_t *)_udata; /* User data for callback */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(image_len);
+
+ *image_len = H5SM_TABLE_SIZE(udata->f);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5SM__cache_table_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5SM__cache_table_deserialize
*
- * Purpose: Loads the master table of Shared Object Header Message
- * indexes.
+ * Purpose: Given a buffer containing the on disk representation of the
+ * master table of Shared Object Header Message indexes, deserialize
+ * the table, copy the contents into a newly allocated instance of
+ * H5SM_master_table_t, and return a pointer to the new instance.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: Pointer to in core representation
+ * Failure: NULL
*
- * Programmer: James Laird
- * November 6, 2006
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
-static H5SM_master_table_t *
-H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void H5_ATTR_UNUSED *udata)
+static void *
+H5SM__cache_table_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
{
- H5SM_master_table_t *table = NULL;
- H5WB_t *wb = NULL; /* Wrapped buffer for table data */
- uint8_t tbl_buf[H5SM_TBL_BUF_SIZE]; /* Buffer for table */
- uint8_t *buf; /* Reading buffer */
- const uint8_t *p; /* Pointer into input buffer */
- uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
- size_t u; /* Counter variable for index headers */
- H5SM_master_table_t *ret_value;
-
- FUNC_ENTER_NOAPI_NOINIT
+ H5F_t *f; /* File pointer -- from user data */
+ H5SM_master_table_t *table = NULL; /* Shared message table that we deserializing */
+ H5SM_table_cache_ud_t *udata = (H5SM_table_cache_ud_t *)_udata; /* Pointer to user data */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into input buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ size_t u; /* Counter variable for index headers */
+ void * ret_value; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check arguments */
+ HDassert(image);
+ HDassert(len > 0);
+ HDassert(udata);
+ HDassert(udata->f);
+ f = udata->f;
+ HDassert(dirty);
/* Verify that we're reading version 0 of the table; this is the only
* version defined so far.
@@ -152,35 +209,18 @@ H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void H5_ATTR_UNUSED *udat
/* Read number of indexes and version from file superblock */
table->num_indexes = H5F_SOHM_NINDEXES(f);
-
- HDassert(addr == H5F_SOHM_ADDR(f));
- HDassert(addr != HADDR_UNDEF);
HDassert(table->num_indexes > 0);
- /* Wrap the local buffer for serialized table info */
- if(NULL == (wb = H5WB_wrap(tbl_buf, sizeof(tbl_buf))))
- HGOTO_ERROR(H5E_SOHM, H5E_CANTINIT, NULL, "can't wrap buffer")
-
/* Compute the size of the SOHM table header on disk. This is the "table"
* itself plus each index within the table
*/
table->table_size = H5SM_TABLE_SIZE(f);
-
- /* Get a pointer to a buffer that's large enough for serialized table */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, table->table_size)))
- HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "can't get actual buffer")
-
- /* Read header from disk */
- if(H5F_block_read(f, H5FD_MEM_SOHM_TABLE, addr, table->table_size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_SOHM, H5E_READERROR, NULL, "can't read SOHM table")
-
- /* Get temporary pointer to serialized table */
- p = buf;
+ HDassert(table->table_size == len);
/* Check magic number */
- if(HDmemcmp(p, H5SM_TABLE_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_SOHM, H5E_CANTLOAD, NULL, "bad SOHM table signature")
- p += H5_SIZEOF_MAGIC;
+ if(HDmemcmp(image, H5SM_TABLE_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_SOHM, H5E_CANTLOAD, NULL, "bad SOHM table signature")
+ image += H5_SIZEOF_MAGIC;
/* Allocate space for the index headers in memory*/
if(NULL == (table->indexes = (H5SM_index_header_t *)H5FL_ARR_MALLOC(H5SM_index_header_t, (size_t)table->num_indexes)))
@@ -189,45 +229,45 @@ H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void H5_ATTR_UNUSED *udat
/* Read in the index headers */
for(u = 0; u < table->num_indexes; ++u) {
/* Verify correct version of index list */
- if(H5SM_LIST_VERSION != *p++)
+ if(H5SM_LIST_VERSION != *image++)
HGOTO_ERROR(H5E_SOHM, H5E_VERSION, NULL, "bad shared message list version number")
/* Type of the index (list or B-tree) */
- table->indexes[u].index_type= (H5SM_index_type_t)*p++;
+ table->indexes[u].index_type= (H5SM_index_type_t)*image++;
/* Type of messages in the index */
- UINT16DECODE(p, table->indexes[u].mesg_types);
+ UINT16DECODE(image, table->indexes[u].mesg_types);
/* Minimum size of message to share */
- UINT32DECODE(p, table->indexes[u].min_mesg_size);
+ UINT32DECODE(image, table->indexes[u].min_mesg_size);
/* List cutoff; fewer than this number and index becomes a list */
- UINT16DECODE(p, table->indexes[u].list_max);
+ UINT16DECODE(image, table->indexes[u].list_max);
/* B-tree cutoff; more than this number and index becomes a B-tree */
- UINT16DECODE(p, table->indexes[u].btree_min);
+ UINT16DECODE(image, table->indexes[u].btree_min);
/* Number of messages shared */
- UINT16DECODE(p, table->indexes[u].num_messages);
+ UINT16DECODE(image, table->indexes[u].num_messages);
/* Address of the actual index */
- H5F_addr_decode(f, &p, &(table->indexes[u].index_addr));
+ H5F_addr_decode(f, &image, &(table->indexes[u].index_addr));
/* Address of the index's heap */
- H5F_addr_decode(f, &p, &(table->indexes[u].heap_addr));
+ H5F_addr_decode(f, &image, &(table->indexes[u].heap_addr));
/* Compute the size of a list index for this SOHM index */
table->indexes[u].list_size = H5SM_LIST_SIZE(f, table->indexes[u].list_max);
} /* end for */
/* Read in checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - (const uint8_t *)buf) == table->table_size);
+ HDassert((size_t)(image - (const uint8_t *)_image) == table->table_size);
/* Compute checksum on entire header */
- computed_chksum = H5_checksum_metadata(buf, (table->table_size - H5SM_SIZEOF_CHECKSUM), 0);
+ computed_chksum = H5_checksum_metadata(_image, (table->table_size - H5SM_SIZEOF_CHECKSUM), 0);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -237,147 +277,169 @@ H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void H5_ATTR_UNUSED *udat
ret_value = table;
done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_SOHM, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
if(!ret_value && table)
if(H5SM_table_free(table) < 0)
HDONE_ERROR(H5E_SOHM, H5E_CANTFREE, NULL, "unable to destroy sohm table")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5SM_table_load() */
+} /* end H5SM__cache_table_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5SM_table_flush
+ * Function: H5SM__cache_table_image_len
*
- * Purpose: Flushes (and destroys) the table of Shared Object Header
- * Message indexes.
+ * Purpose: Compute the size in bytes of the specified instance of
+ * H5SM_master_table_t on disk, and return it in *image_len.
+ * On failure, the value of *image_len is undefined.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: James Laird
- * November 6, 2006
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5SM_table_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_master_table_t *table)
+H5SM__cache_table_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- H5WB_t *wb = NULL; /* Wrapped buffer for table data */
- uint8_t tbl_buf[H5SM_TBL_BUF_SIZE]; /* Buffer for table */
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5SM_master_table_t *table = (const H5SM_master_table_t *)_thing; /* Shared message table to query */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /* check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(table);
+ HDassert(table->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(table->cache_info.type == H5AC_SOHM_TABLE);
+ HDassert(image_len);
- if(table->cache_info.is_dirty) {
- uint8_t *buf; /* Temporary buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- uint32_t computed_chksum; /* Computed metadata checksum value */
- size_t u; /* Counter variable */
+ *image_len = table->table_size;
- /* Verify that we're writing version 0 of the table; this is the only
- * version defined so far.
- */
- HDassert(H5F_SOHM_VERS(f) == HDF5_SHAREDHEADER_VERSION);
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5SM__cache_table_image_len() */
- /* Wrap the local buffer for serialized header info */
- if(NULL == (wb = H5WB_wrap(tbl_buf, sizeof(tbl_buf))))
- HGOTO_ERROR(H5E_SOHM, H5E_CANTINIT, FAIL, "can't wrap buffer")
+/***************************************/
+/* no H5SM_cache_table_pre_serialize() */
+/***************************************/
- /* Get a pointer to a buffer that's large enough for serialized table */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, table->table_size)))
- HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "can't get actual buffer")
+
+/*-------------------------------------------------------------------------
+ * Function: H5SM__cache_table_serialize
+ *
+ * Purpose: Serialize the contents of the supplied shared message table, and
+ * load this data into the supplied buffer.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 7/28/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5SM__cache_table_serialize(const H5F_t *f, void *_image, size_t len,
+ void *_thing)
+{
+ H5SM_master_table_t *table = (H5SM_master_table_t *)_thing; /* Shared message table to encode */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ size_t u; /* Counter variable */
- /* Get temporary pointer to buffer for serialized table */
- p = buf;
+ FUNC_ENTER_STATIC_NOERR
- /* Encode magic number */
- HDmemcpy(p, H5SM_TABLE_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
+ /* Check arguments */
+ HDassert(f);
+ HDassert(image);
+ HDassert(table);
+ HDassert(table->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(table->cache_info.type == H5AC_SOHM_TABLE);
+ HDassert(table->table_size == len);
- /* Encode each index header */
- for(u = 0; u < table->num_indexes; ++u) {
- /* Version for this list */
- *p++ = H5SM_LIST_VERSION;
+ /* Verify that we're writing version 0 of the table; this is the only
+ * version defined so far.
+ */
+ HDassert(H5F_SOHM_VERS(f) == HDF5_SHAREDHEADER_VERSION);
- /* Is message index a list or a B-tree? */
- *p++ = table->indexes[u].index_type;
+ /* Encode magic number */
+ HDmemcpy(image, H5SM_TABLE_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
- /* Type of messages in the index */
- UINT16ENCODE(p, table->indexes[u].mesg_types);
+ /* Encode each index header */
+ for(u = 0; u < table->num_indexes; ++u) {
+ /* Version for this list */
+ *image++ = H5SM_LIST_VERSION;
- /* Minimum size of message to share */
- UINT32ENCODE(p, table->indexes[u].min_mesg_size);
+ /* Is message index a list or a B-tree? */
+ *image++ = table->indexes[u].index_type;
- /* List cutoff; fewer than this number and index becomes a list */
- UINT16ENCODE(p, table->indexes[u].list_max);
+ /* Type of messages in the index */
+ UINT16ENCODE(image, table->indexes[u].mesg_types);
- /* B-tree cutoff; more than this number and index becomes a B-tree */
- UINT16ENCODE(p, table->indexes[u].btree_min);
+ /* Minimum size of message to share */
+ UINT32ENCODE(image, table->indexes[u].min_mesg_size);
- /* Number of messages shared */
- UINT16ENCODE(p, table->indexes[u].num_messages);
+ /* List cutoff; fewer than this number and index becomes a list */
+ UINT16ENCODE(image, table->indexes[u].list_max);
- /* Address of the actual index */
- H5F_addr_encode(f, &p, table->indexes[u].index_addr);
+ /* B-tree cutoff; more than this number and index becomes a B-tree */
+ UINT16ENCODE(image, table->indexes[u].btree_min);
- /* Address of the index's heap */
- H5F_addr_encode(f, &p, table->indexes[u].heap_addr);
- } /* end for */
+ /* Number of messages shared */
+ UINT16ENCODE(image, table->indexes[u].num_messages);
- /* Compute checksum on buffer */
- computed_chksum = H5_checksum_metadata(buf, (table->table_size - H5SM_SIZEOF_CHECKSUM), 0);
- UINT32ENCODE(p, computed_chksum);
+ /* Address of the actual index */
+ H5F_addr_encode(f, &image, table->indexes[u].index_addr);
- /* Write the table to disk */
- HDassert((size_t)(p - buf) == table->table_size);
- if(H5F_block_write(f, H5FD_MEM_SOHM_TABLE, addr, table->table_size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_SOHM, H5E_CANTFLUSH, FAIL, "unable to save sohm table to disk")
+ /* Address of the index's heap */
+ H5F_addr_encode(f, &image, table->indexes[u].heap_addr);
+ } /* end for */
- table->cache_info.is_dirty = FALSE;
- } /* end if */
+ /* Compute checksum on buffer */
+ computed_chksum = H5_checksum_metadata(_image, (table->table_size - H5SM_SIZEOF_CHECKSUM), 0);
+ UINT32ENCODE(image, computed_chksum);
- if(destroy)
- if(H5SM_table_dest(f, table) < 0)
- HGOTO_ERROR(H5E_SOHM, H5E_CANTFREE, FAIL, "unable to destroy sohm table")
+ /* sanity check */
+ HDassert((size_t)(image - ((uint8_t *)_image)) == table->table_size);
-done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_SOHM, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer")
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5SM__cache_table_serialize() */
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5SM_table_flush() */
+/*****************************************/
+/* no H5SM_cache_table_notify() function */
+/*****************************************/
/*-------------------------------------------------------------------------
- * Function: H5SM_table_dest
+ * Function: H5SM__cache_table_free_icr
*
- * Purpose: Frees memory used by the SOHM table.
+ * Purpose: Free memory used by the SOHM table.
*
- * Return: Non-negative on success/Negative on failure
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
*
- * Programmer: James Laird
- * November 6, 2006
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5SM_table_dest(H5F_t H5_ATTR_UNUSED *f, H5SM_master_table_t* table)
+H5SM__cache_table_free_icr(void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5SM_master_table_t *table = (H5SM_master_table_t *)_thing; /* Shared message table to release */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(table);
- HDassert(table->indexes);
+ HDassert(table->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(table->cache_info.type == H5AC_SOHM_TABLE);
/* Destroy Shared Object Header Message table */
if(H5SM_table_free(table) < 0)
@@ -385,105 +447,80 @@ H5SM_table_dest(H5F_t H5_ATTR_UNUSED *f, H5SM_master_table_t* table)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5SM_table_dest() */
+} /* end H5SM_cache_table_free_icr() */
/*-------------------------------------------------------------------------
- * Function: H5SM_table_clear
+ * Function: H5SM__cache_list_get_load_size()
*
- * Purpose: Mark this table as no longer being dirty.
+ * Purpose: Return the on disk size of list of SOHM messages. In this case,
+ * we simply look up the size in the user data, and return that value
+ * in *image_len.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: James Laird
- * November 6, 2006
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5SM_table_clear(H5F_t *f, H5SM_master_table_t *table, hbool_t destroy)
+H5SM__cache_list_get_load_size(const void *_udata, size_t *image_len)
{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- /*
- * Check arguments.
- */
- HDassert(table);
-
- /* Reset the dirty flag. */
- table->cache_info.is_dirty = FALSE;
-
- if(destroy)
- if(H5SM_table_dest(f, table) < 0)
- HGOTO_ERROR(H5E_SOHM, H5E_CANTFREE, FAIL, "unable to delete SOHM master table")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5SM_table_clear() */
+ const H5SM_list_cache_ud_t *udata = (const H5SM_list_cache_ud_t *)_udata; /* User data for callback */
-
-/*-------------------------------------------------------------------------
- * Function: H5SM_table_size
- *
- * Purpose: Returns the size of the table encoded on disk.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: James Laird
- * November 6, 2006
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5SM_table_size(const H5F_t H5_ATTR_UNUSED *f, const H5SM_master_table_t *table, size_t *size_ptr)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Check arguments */
- HDassert(f);
- HDassert(table);
- HDassert(size_ptr);
+ HDassert(udata);
+ HDassert(udata->header);
+ HDassert(udata->header->list_size > 0);
+ HDassert(image_len);
- /* Set size value */
- *size_ptr = table->table_size;
+ *image_len = udata->header->list_size;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5SM_table_size() */
+} /* end H5SM__cache_list_get_load_size() */
/*-------------------------------------------------------------------------
- * Function: H5SM_list_load
+ * Function: H5SM__cache_list_deserialize
*
- * Purpose: Loads a list of SOHM messages.
+ * Purpose: Given a buffer containing the on disk image of a list of
+ * SOHM message, deserialize the list, load it into a newly allocated
+ * instance of H5SM_list_t, and return a pointer to same.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: Pointer to in core representation
+ * Failure: NULL
*
- * Programmer: James Laird
- * November 6, 2006
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
-static H5SM_list_t *
-H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
+static void *
+H5SM__cache_list_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
{
- H5SM_list_t *list; /* The SOHM list being read in */
- H5SM_list_cache_ud_t *udata = (H5SM_list_cache_ud_t *)_udata; /* User data for callback */
- H5SM_bt2_ctx_t ctx; /* Message encoding context */
- H5WB_t *wb = NULL; /* Wrapped buffer for list index data */
- uint8_t lst_buf[H5SM_LST_BUF_SIZE]; /* Buffer for list index */
- uint8_t *buf; /* Reading buffer */
- uint8_t *p; /* Pointer into input buffer */
- uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
- size_t u; /* Counter variable for messages in list */
- H5SM_list_t *ret_value; /* Return value */
+ H5SM_list_t *list = NULL; /* The SOHM list being read in */
+ H5SM_list_cache_ud_t *udata = (H5SM_list_cache_ud_t *)_udata; /* User data for callback */
+ H5SM_bt2_ctx_t ctx; /* Message encoding context */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into input buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ size_t u; /* Counter variable for messages in list */
+ void * ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
+ HDassert(image);
+ HDassert(len > 0);
+ HDassert(udata);
HDassert(udata->header);
+ HDassert(udata->header->list_size == len);
+ HDassert(dirty);
/* Allocate space for the SOHM list data structure */
if(NULL == (list = H5FL_MALLOC(H5SM_list_t)))
@@ -495,42 +532,28 @@ H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "file allocation failed for SOHM list")
list->header = udata->header;
- /* Wrap the local buffer for serialized list index info */
- if(NULL == (wb = H5WB_wrap(lst_buf, sizeof(lst_buf))))
- HGOTO_ERROR(H5E_SOHM, H5E_CANTINIT, NULL, "can't wrap buffer")
-
- /* Get a pointer to a buffer that's large enough for serialized list index */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, udata->header->list_size)))
- HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "can't get actual buffer")
-
- /* Read list from disk */
- if(H5F_block_read(f, H5FD_MEM_SOHM_INDEX, addr, udata->header->list_size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_SOHM, H5E_READERROR, NULL, "can't read SOHM list")
-
- /* Get temporary pointer to serialized list index */
- p = buf;
-
/* Check magic number */
- if(HDmemcmp(p, H5SM_LIST_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5SM_LIST_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_SOHM, H5E_CANTLOAD, NULL, "bad SOHM list signature")
- p += H5_SIZEOF_MAGIC;
+ image += H5_SIZEOF_MAGIC;
/* Read messages into the list array */
ctx.sizeof_addr = H5F_SIZEOF_ADDR(udata->f);
for(u = 0; u < udata->header->num_messages; u++) {
- if(H5SM_message_decode(p, &(list->messages[u]), &ctx) < 0)
+ if(H5SM_message_decode(image, &(list->messages[u]), &ctx) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTLOAD, NULL, "can't decode shared message")
- p += H5SM_SOHM_ENTRY_SIZE(udata->f);
+
+ image += H5SM_SOHM_ENTRY_SIZE(udata->f);
} /* end for */
/* Read in checksum */
- UINT32DECODE(p, stored_chksum);
+ UINT32DECODE(image, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - buf) <= udata->header->list_size);
+ HDassert((size_t)(image - (const uint8_t *)_image) <= udata->header->list_size);
/* Compute checksum on entire header */
- computed_chksum = H5_checksum_metadata(buf, ((size_t)(p - buf) - H5SM_SIZEOF_CHECKSUM), 0);
+ computed_chksum = H5_checksum_metadata(_image, ((size_t)(image - (const uint8_t *)_image) - H5SM_SIZEOF_CHECKSUM), 0);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -544,9 +567,6 @@ H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
ret_value = list;
done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_SOHM, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
if(!ret_value && list) {
if(list->messages)
list->messages = H5FL_ARR_FREE(H5SM_sohm_t, list->messages);
@@ -554,205 +574,158 @@ done:
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5SM_list_load() */
+} /* end H5SM__cache_list_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5SM_list_flush
+ * Function: H5SM__cache_list_image_len
*
- * Purpose: Flush this list index.
+ * Purpose: Get the size of the shared message list on disk.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: James Laird
- * November 6, 2006
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5SM_list_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_list_t *list)
+H5SM__cache_list_image_len(const void *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- H5WB_t *wb = NULL; /* Wrapped buffer for list index data */
- uint8_t lst_buf[H5SM_LST_BUF_SIZE]; /* Buffer for list index */
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5SM_list_t *list = (const H5SM_list_t *)_thing; /* Shared message list to query */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC_NOERR
- /* check arguments */
- HDassert(f);
- HDassert(H5F_addr_defined(addr));
+ /* Check arguments */
HDassert(list);
+ HDassert(list->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(list->cache_info.type == H5AC_SOHM_LIST);
HDassert(list->header);
+ HDassert(image_len);
- if(list->cache_info.is_dirty) {
- H5SM_bt2_ctx_t ctx; /* Message encoding context */
- uint8_t *buf; /* Temporary buffer */
- uint8_t *p; /* Pointer into raw data buffer */
- uint32_t computed_chksum; /* Computed metadata checksum value */
- size_t mesgs_written; /* Number of messages written to list */
- size_t u; /* Local index variable */
-
- /* Wrap the local buffer for serialized list index info */
- if(NULL == (wb = H5WB_wrap(lst_buf, sizeof(lst_buf))))
- HGOTO_ERROR(H5E_SOHM, H5E_CANTINIT, FAIL, "can't wrap buffer")
-
- /* Get a pointer to a buffer that's large enough for serialized list index */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, list->header->list_size)))
- HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "can't get actual buffer")
-
- /* Get temporary pointer to buffer for serialized list index */
- p = buf;
-
- /* Encode magic number */
- HDmemcpy(p, H5SM_LIST_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += H5_SIZEOF_MAGIC;
-
- /* Write messages from the messages array to disk */
- mesgs_written = 0;
- ctx.sizeof_addr = H5F_SIZEOF_ADDR(f);
- for(u = 0; u < list->header->list_max && mesgs_written < list->header->num_messages; u++) {
- if(list->messages[u].location != H5SM_NO_LOC) {
- if(H5SM_message_encode(p, &(list->messages[u]), &ctx) < 0)
- HGOTO_ERROR(H5E_SOHM, H5E_CANTFLUSH, FAIL, "unable to write shared message to disk")
-
- p += H5SM_SOHM_ENTRY_SIZE(f);
- ++mesgs_written;
- } /* end if */
- } /* end for */
- HDassert(mesgs_written == list->header->num_messages);
-
- /* Compute checksum on buffer */
- computed_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
- UINT32ENCODE(p, computed_chksum);
-#ifdef H5_CLEAR_MEMORY
-HDmemset(p, 0, (list->header->list_size - (size_t)(p - buf)));
-#endif /* H5_CLEAR_MEMORY */
+ *image_len = list->header->list_size;
- /* Write the list to disk */
- HDassert((size_t)(p - buf) <= list->header->list_size);
- if(H5F_block_write(f, H5FD_MEM_SOHM_INDEX, addr, list->header->list_size, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_SOHM, H5E_CANTFLUSH, FAIL, "unable to save sohm table to disk")
-
- list->cache_info.is_dirty = FALSE;
- } /* end if */
-
- if(destroy)
- if(H5SM_list_dest(f, list) < 0)
- HGOTO_ERROR(H5E_SOHM, H5E_CANTFREE, FAIL, "unable to destroy list")
-
-done:
- /* Release resources */
- if(wb && H5WB_unwrap(wb) < 0)
- HDONE_ERROR(H5E_SOHM, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer")
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5SM__cache_list_image_len() */
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5SM_list_flush() */
+/**************************************/
+/* no H5SM_cache_list_pre_serialize() */
+/**************************************/
/*-------------------------------------------------------------------------
- * Function: H5SM_list_dest
+ * Function: H5SM__cache_list_serialize
*
- * Purpose: Frees all memory used by the list.
+ * Purpose: Serialize the contents of the supplied shared message list, and
+ * load this data into the supplied buffer.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Success: SUCCEED
+ * Failure: FAIL
*
- * Programmer: James Laird
- * November 6, 2006
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5SM_list_dest(H5F_t *f, H5SM_list_t* list)
+H5SM__cache_list_serialize(const H5F_t *f, void *_image, size_t len,
+ void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5SM_list_t *list = (H5SM_list_t *)_thing ; /* Instance being serialized */
+ H5SM_bt2_ctx_t ctx; /* Message encoding context */
+ uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ size_t mesgs_serialized; /* Number of messages serialized */
+ size_t u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- /* Sanity check */
+ /* Check arguments */
+ HDassert(f);
+ HDassert(image);
HDassert(list);
+ HDassert(list->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(list->cache_info.type == H5AC_SOHM_LIST);
HDassert(list->header);
- HDassert(list->messages);
+ HDassert(list->header->list_size == len);
+
+ /* Encode magic number */
+ HDmemcpy(image, H5SM_LIST_MAGIC, (size_t)H5_SIZEOF_MAGIC);
+ image += H5_SIZEOF_MAGIC;
+
+ /* serialize messages from the messages array */
+ mesgs_serialized = 0;
+ ctx.sizeof_addr = H5F_SIZEOF_ADDR(f);
+ for(u = 0; ((u < list->header->list_max) && (mesgs_serialized < list->header->num_messages)); u++) {
+ if(list->messages[u].location != H5SM_NO_LOC) {
+ if(H5SM_message_encode(image, &(list->messages[u]), &ctx) < 0)
+ HGOTO_ERROR(H5E_SOHM, H5E_CANTFLUSH, FAIL, "unable to serialize shared message")
+
+ image += H5SM_SOHM_ENTRY_SIZE(f);
+ ++mesgs_serialized;
+ } /* end if */
+ } /* end for */
- /* If we're going to free the space on disk, the address must be valid */
- HDassert(!list->cache_info.free_file_space_on_destroy || H5F_addr_defined(list->cache_info.addr));
+ HDassert(mesgs_serialized == list->header->num_messages);
- /* Check for freeing file space for shared message index list */
- if(list->cache_info.free_file_space_on_destroy) {
- /* Release the space on disk */
- /* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_SOHM_INDEX, H5AC_dxpl_id, list->cache_info.addr, (hsize_t)list->header->list_size) < 0)
- HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "unable to free shared message list")
- } /* end if */
+ /* Compute checksum on buffer */
+ computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (uint8_t *)_image), 0);
+ UINT32ENCODE(image, computed_chksum);
- /* Destroy Shared Object Header Message list */
- if(H5SM_list_free(list) < 0)
- HGOTO_ERROR(H5E_SOHM, H5E_CANTRELEASE, FAIL, "unable to free shared message list")
+#ifdef H5_CLEAR_MEMORY
+ HDmemset(image, 0, (list->header->list_size - (size_t)(image - (uint8_t *)_image)));
+#endif /* H5_CLEAR_MEMORY */
+
+ /* sanity check */
+ HDassert((size_t)(image - (uint8_t *)_image) <= list->header->list_size);
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5SM_list_dest() */
+} /* end H5SM__cache_list_serialize() */
+
+/****************************************/
+/* no H5SM_cache_list_notify() function */
+/****************************************/
/*-------------------------------------------------------------------------
- * Function: H5SM_list_clear
+ * Function: H5SM__cache_list_free_icr
*
- * Purpose: Marks a list as not dirty.
+ * Purpose: Free all memory used by the list.
*
- * Return: Non-negative on success/Negative on failure
+ * Note: The metadata cache sets the object's cache_info.magic to
+ * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
+ * callback (checked in assert).
*
- * Programmer: James Laird
- * November 6, 2006
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 7/28/14
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5SM_list_clear(H5F_t *f, H5SM_list_t *list, hbool_t destroy)
+H5SM__cache_list_free_icr(void *_thing)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5SM_list_t *list = (H5SM_list_t *)_thing; /* Shared message list to release */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(list);
+ HDassert(list->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(list->cache_info.type == H5AC_SOHM_LIST);
- /* Reset the dirty flag. */
- list->cache_info.is_dirty = FALSE;
-
- if(destroy)
- if(H5SM_list_dest(f, list) < 0)
- HGOTO_ERROR(H5E_SOHM, H5E_CANTFREE, FAIL, "unable to destroy SOHM list")
+ /* Destroy Shared Object Header Message list */
+ if(H5SM_list_free(list) < 0)
+ HGOTO_ERROR(H5E_SOHM, H5E_CANTRELEASE, FAIL, "unable to free shared message list")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end of H5SM_list_clear() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5SM_list_size
- *
- * Purpose: Gets the size of a list on disk.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: James Laird
- * November 6, 2006
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5SM_list_size(const H5F_t H5_ATTR_UNUSED *f, const H5SM_list_t *list, size_t *size_ptr)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* check arguments */
- HDassert(f);
- HDassert(list);
- HDassert(list->header);
- HDassert(size_ptr);
-
- /* Set size value */
- *size_ptr = list->header->list_size;
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5SM_list_size() */
+} /* end H5O_cache_list_free_icr() */
diff --git a/src/H5SMtest.c b/src/H5SMtest.c
index 12ed766..c4e02bc 100644
--- a/src/H5SMtest.c
+++ b/src/H5SMtest.c
@@ -98,7 +98,7 @@ H5SM_get_mesg_count_test(H5F_t *f, hid_t dxpl_id, unsigned type_id,
cache_udata.f = f;
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Find the correct index for this message type */