summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorDana Robinson <derobins@hdfgroup.org>2016-06-29 15:13:20 (GMT)
committerDana Robinson <derobins@hdfgroup.org>2016-06-29 15:13:20 (GMT)
commitc15d93826b1fee98dfebc9903808a3b8a29e9512 (patch)
tree3321abfc300b207e1a2d9db0356a995fd2822950 /src
parent3d13b7c6eeedda3037d2db2ea5bd30a36ebde5c0 (diff)
parent83724bd7873e3e199a94ba9c3526732d8117e996 (diff)
downloadhdf5-c15d93826b1fee98dfebc9903808a3b8a29e9512.zip
hdf5-c15d93826b1fee98dfebc9903808a3b8a29e9512.tar.gz
hdf5-c15d93826b1fee98dfebc9903808a3b8a29e9512.tar.bz2
[svn-r30118] Sync with trunk.
Diffstat (limited to 'src')
-rw-r--r--src/H5AC.c118
-rw-r--r--src/H5ACprivate.h64
-rw-r--r--src/H5B2int.c8
-rw-r--r--src/H5C.c1851
-rw-r--r--src/H5Cpkg.h24
-rw-r--r--src/H5Cprivate.h55
-rw-r--r--src/H5Cquery.c4
-rw-r--r--src/H5EA.c189
-rw-r--r--src/H5EAcache.c42
-rw-r--r--src/H5EAdblkpage.c16
-rw-r--r--src/H5EAdblock.c42
-rw-r--r--src/H5EApkg.h3
-rw-r--r--src/H5EAsblock.c30
-rw-r--r--src/H5FD.c6
-rw-r--r--src/H5FDcore.c4
-rw-r--r--src/H5FDfamily.c8
-rw-r--r--src/H5FDmulti.c4
-rw-r--r--src/H5FDprivate.h2
-rw-r--r--src/H5FDpublic.h4
-rw-r--r--src/H5FDstdio.c4
-rw-r--r--src/H5HFcache.c292
-rw-r--r--src/H5MM.c8
-rw-r--r--src/H5PL.c2
-rw-r--r--src/H5T.c2
-rw-r--r--src/H5Znbit.c836
-rw-r--r--src/H5Zscaleoffset.c211
-rw-r--r--src/H5Ztrans.c141
-rw-r--r--src/H5detect.c2
-rw-r--r--src/H5private.h33
-rw-r--r--src/H5public.h19
30 files changed, 1665 insertions, 2359 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index 1604d35..3021a6d 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -183,13 +183,13 @@ done:
herr_t
H5AC__init_package(void)
{
+#ifdef H5_DEBUG_BUILD
H5P_genplist_t *xfer_plist; /* Dataset transfer property list object */
+ H5FD_dxpl_type_t dxpl_type; /* Property indicating the type of the internal dxpl */
+#endif /* H5_DEBUG_BUILD */
#ifdef H5_HAVE_PARALLEL
H5P_coll_md_read_flag_t coll_meta_read;
#endif /* H5_HAVE_PARALLEL */
-#ifdef H5_DEBUG_BUILD
- H5FD_dxpl_type_t dxpl_type; /* Property indicating the type of the internal dxpl */
-#endif /* H5_DEBUG_BUILD */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -201,9 +201,8 @@ H5AC__init_package(void)
const char *s; /* String for environment variables */
s = HDgetenv("H5_COLL_API_SANITY_CHECK");
- if(s && HDisdigit(*s)) {
+ if(s && HDisdigit(*s))
H5_coll_api_sanity_check_g = (hbool_t)HDstrtol(s, NULL, 0);
- }
}
#endif /* H5_HAVE_PARALLEL */
@@ -278,7 +277,9 @@ H5AC__init_package(void)
H5AC_rawdata_dxpl_id = H5P_DATASET_XFER_DEFAULT;
#endif /* defined(H5_HAVE_PARALLEL) || defined(H5_DEBUG_BUILD) */
+#if defined(H5_DEBUG_BUILD) | defined(H5_HAVE_PARALLEL)
done:
+#endif /* defined(H5_DEBUG_BUILD) | defined(H5_HAVE_PARALLEL) */
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5AC__init_package() */
@@ -2077,113 +2078,6 @@ done:
/*************************************************************************/
-/*************************** Debugging Functions: ************************/
-/*************************************************************************/
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5AC_get_entry_ptr_from_addr()
- *
- * Purpose: Debugging function that attempts to look up an entry in the
- * cache by its file address, and if found, returns a pointer
- * to the entry in *entry_ptr_ptr. If the entry is not in the
- * cache, *entry_ptr_ptr is set to NULL.
- *
- * WARNING: This call should be used only in debugging
- * routines, and it should be avoided when
- * possible.
- *
- * Further, if we ever multi-thread the cache,
- * this routine will have to be either discarded
- * or heavily re-worked.
- *
- * Finally, keep in mind that the entry whose
- * pointer is obtained in this fashion may not
- * be in a stable state.
- *
- * Note that this function is only defined if NDEBUG
- * is not defined.
- *
- * As heavy use of this function is almost certainly a
- * bad idea, the metadata cache tracks the number of
- * successful calls to this function, and (if
- * H5C_DO_SANITY_CHECKS is defined) displays any
- * non-zero count on cache shutdown.
- *
- * This function is just a wrapper that calls the H5C
- * version of the function.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 5/30/14
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-herr_t
-H5AC_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr, void **entry_ptr_ptr)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- if(H5C_get_entry_ptr_from_addr(f, addr, entry_ptr_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_ptr_from_addr() failed")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_get_entry_ptr_from_addr() */
-#endif /* NDEBUG */
-
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5AC_verify_entry_type()
- *
- * Purpose: Debugging function that attempts to look up an entry in the
- * cache by its file address, and if found, test to see if its
- * type field contains the expected value.
- *
- * If the specified entry is in cache, *in_cache_ptr is set
- * to TRUE, and *type_ok_ptr is set to TRUE or FALSE
- * depending on whether the entries type field matches the
- * expected_type parameter
- *
- * If the target entry is not in cache, *in_cache_ptr is
- * set to FALSE, and *type_ok_ptr is undefined.
- *
- * Note that this function is only defined if NDEBUG
- * is not defined.
- *
- * This function is just a wrapper that calls the H5C
- * version of the function.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 5/30/14
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-herr_t
-H5AC_verify_entry_type(const H5F_t *f, haddr_t addr, const H5AC_class_t *expected_type,
- hbool_t *in_cache_ptr, hbool_t *type_ok_ptr)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- if(H5C_verify_entry_type(f, addr, expected_type, in_cache_ptr, type_ok_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_verify_entry_type() failed")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_verify_entry_type() */
-#endif /* NDEBUG */
-
-
-
-/*************************************************************************/
/**************************** Private Functions: *************************/
/*************************************************************************/
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 513ce61..84c9e93 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -53,35 +53,35 @@
/* Types of metadata objects cached */
typedef enum {
- H5AC_BT_ID = 0, /* ( 0) B-tree nodes */
- H5AC_SNODE_ID, /* ( 1) symbol table nodes */
- H5AC_LHEAP_PRFX_ID, /* ( 2) local heap prefix */
- H5AC_LHEAP_DBLK_ID, /* ( 3) local heap data block */
- H5AC_GHEAP_ID, /* ( 4) global heap */
- H5AC_OHDR_ID, /* ( 5) object header */
- H5AC_OHDR_CHK_ID, /* ( 6) object header chunk */
- H5AC_BT2_HDR_ID, /* ( 8) v2 B-tree header */
- H5AC_BT2_INT_ID, /* ( 9) v2 B-tree internal node */
- H5AC_BT2_LEAF_ID, /* (10) v2 B-tree leaf node */
- H5AC_FHEAP_HDR_ID, /* (11) fractal heap header */
- H5AC_FHEAP_DBLOCK_ID, /* (12) fractal heap direct block */
- H5AC_FHEAP_IBLOCK_ID, /* (13) fractal heap indirect block */
- H5AC_FSPACE_HDR_ID, /* (14) free space header */
- H5AC_FSPACE_SINFO_ID, /* (15) free space sections */
- H5AC_SOHM_TABLE_ID, /* (16) shared object header message master table */
- H5AC_SOHM_LIST_ID, /* (17) shared message index stored as a list */
- H5AC_EARRAY_HDR_ID, /* (18) extensible array header */
- H5AC_EARRAY_IBLOCK_ID, /* (19) extensible array index block */
- H5AC_EARRAY_SBLOCK_ID, /* (20) extensible array super block */
- H5AC_EARRAY_DBLOCK_ID, /* (21) extensible array data block */
- H5AC_EARRAY_DBLK_PAGE_ID, /* (22) extensible array data block page */
- H5AC_FARRAY_HDR_ID, /* (23) fixed array header */
- H5AC_FARRAY_DBLOCK_ID, /* (24) fixed array data block */
- H5AC_FARRAY_DBLK_PAGE_ID, /* (25) fixed array data block page */
- H5AC_SUPERBLOCK_ID, /* (26) file superblock */
- H5AC_DRVRINFO_ID, /* (27) driver info block (supplements superblock)*/
- H5AC_TEST_ID, /* (28) test entry -- not used for actual files */
- H5AC_NTYPES /* Number of types, must be last */
+ H5AC_BT_ID = 0, /* ( 0) B-tree nodes */
+ H5AC_SNODE_ID, /* ( 1) symbol table nodes */
+ H5AC_LHEAP_PRFX_ID, /* ( 2) local heap prefix */
+ H5AC_LHEAP_DBLK_ID, /* ( 3) local heap data block */
+ H5AC_GHEAP_ID, /* ( 4) global heap */
+ H5AC_OHDR_ID, /* ( 5) object header */
+ H5AC_OHDR_CHK_ID, /* ( 6) object header chunk */
+ H5AC_BT2_HDR_ID, /* ( 7) v2 B-tree header */
+ H5AC_BT2_INT_ID, /* ( 8) v2 B-tree internal node */
+ H5AC_BT2_LEAF_ID, /* ( 9) v2 B-tree leaf node */
+ H5AC_FHEAP_HDR_ID, /* (10) fractal heap header */
+ H5AC_FHEAP_DBLOCK_ID, /* (11) fractal heap direct block */
+ H5AC_FHEAP_IBLOCK_ID, /* (12) fractal heap indirect block */
+ H5AC_FSPACE_HDR_ID, /* (13) free space header */
+ H5AC_FSPACE_SINFO_ID, /* (14) free space sections */
+ H5AC_SOHM_TABLE_ID, /* (15) shared object header message master table */
+ H5AC_SOHM_LIST_ID, /* (16) shared message index stored as a list */
+ H5AC_EARRAY_HDR_ID, /* (17) extensible array header */
+ H5AC_EARRAY_IBLOCK_ID, /* (18) extensible array index block */
+ H5AC_EARRAY_SBLOCK_ID, /* (19) extensible array super block */
+ H5AC_EARRAY_DBLOCK_ID, /* (20) extensible array data block */
+ H5AC_EARRAY_DBLK_PAGE_ID, /* (21) extensible array data block page */
+ H5AC_FARRAY_HDR_ID, /* (22) fixed array header */
+ H5AC_FARRAY_DBLOCK_ID, /* (23) fixed array data block */
+ H5AC_FARRAY_DBLK_PAGE_ID, /* (24) fixed array data block page */
+ H5AC_SUPERBLOCK_ID, /* (25) file superblock */
+ H5AC_DRVRINFO_ID, /* (26) driver info block (supplements superblock) */
+ H5AC_TEST_ID, /* (27) test entry -- not used for actual files */
+ H5AC_NTYPES /* Number of types, must be last */
} H5AC_type_t;
/* H5AC_DUMP_STATS_ON_CLOSE should always be FALSE when
@@ -311,6 +311,7 @@ H5_DLLVAR hid_t H5AC_rawdata_dxpl_id;
#define H5AC__TAKE_OWNERSHIP_FLAG H5C__TAKE_OWNERSHIP_FLAG
#define H5AC__FLUSH_LAST_FLAG H5C__FLUSH_LAST_FLAG
#define H5AC__FLUSH_COLLECTIVELY_FLAG H5C__FLUSH_COLLECTIVELY_FLAG
+#define H5AC__EVICT_ALLOW_LAST_PINS_FLAG H5C__EVICT_ALLOW_LAST_PINS_FLAG
/* #defines of flags used to report entry status in the
@@ -380,11 +381,6 @@ H5_DLL herr_t H5AC_add_candidate(H5AC_t * cache_ptr, haddr_t addr);
#ifndef NDEBUG /* debugging functions */
H5_DLL herr_t H5AC_stats(const H5F_t *f);
H5_DLL herr_t H5AC_dump_cache(const H5F_t *f);
-H5_DLL herr_t H5AC_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr,
- void ** entry_ptr_ptr);
-H5_DLL herr_t H5AC_verify_entry_type(const H5F_t * f, haddr_t addr,
- const H5AC_class_t * expected_type, hbool_t * in_cache_ptr,
- hbool_t * type_ok_ptr);
#endif /* NDEBUG */ /* end debugging functions */
#endif /* !_H5ACprivate_H */
diff --git a/src/H5B2int.c b/src/H5B2int.c
index e877a17..3bd788c 100644
--- a/src/H5B2int.c
+++ b/src/H5B2int.c
@@ -3612,7 +3612,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+H5_ATTR_PURE static herr_t
H5B2__assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf)
{
/* General sanity checking on node */
@@ -3635,7 +3635,7 @@ H5B2__assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf)
*
*-------------------------------------------------------------------------
*/
-static herr_t
+H5_ATTR_PURE static herr_t
H5B2__assert_leaf2(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf, const H5B2_leaf_t H5_ATTR_UNUSED *leaf2)
{
/* General sanity checking on node */
@@ -3658,7 +3658,7 @@ H5B2__assert_leaf2(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf, const H5B2_le
*
*-------------------------------------------------------------------------
*/
-static herr_t
+H5_ATTR_PURE static herr_t
H5B2__assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal)
{
hsize_t tot_all_nrec; /* Total number of records at or below this node */
@@ -3699,7 +3699,7 @@ H5B2__assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2
*
*-------------------------------------------------------------------------
*/
-static herr_t
+H5_ATTR_PURE static herr_t
H5B2__assert_internal2(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal, const H5B2_internal_t *internal2)
{
hsize_t tot_all_nrec; /* Total number of records at or below this node */
diff --git a/src/H5C.c b/src/H5C.c
index e1ac8f2..66e3e25 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -166,6 +166,10 @@ static herr_t H5C_make_space_in_cache(H5F_t * f,
size_t space_needed,
hbool_t write_permitted);
+static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry);
+
+static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry);
+
static herr_t H5C__generate_image(const H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
hid_t dxpl_id, int64_t *entry_size_change_ptr);
@@ -180,6 +184,11 @@ static herr_t H5C_validate_pinned_entry_list(H5C_t * cache_ptr);
static herr_t H5C_validate_protected_entry_list(H5C_t * cache_ptr);
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+#ifndef NDEBUG
+static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
+ const H5C_cache_entry_t * base_entry);
+#endif /* NDEBUG */
+
#if 0 /* debugging routines */
herr_t H5C_dump_cache(H5C_t * cache_ptr, const char * cache_name);
herr_t H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn);
@@ -206,6 +215,9 @@ hbool_t H5_PKG_INIT_VAR = FALSE;
/* Declare a free list to manage the H5C_t struct */
H5FL_DEFINE_STATIC(H5C_t);
+/* Declare a free list to manage flush dependency arrays */
+H5FL_BLK_DEFINE_STATIC(parent);
+
/* Declare extern free list to manage the H5C_collective_write_t struct */
H5FL_EXTERN(H5C_collective_write_t);
@@ -443,10 +455,6 @@ H5C_create(size_t max_cache_size,
cache_ptr->prefix[0] = '\0'; /* empty string */
-#ifndef NDEBUG
- cache_ptr->get_entry_ptr_from_addr_counter = 0;
-#endif /* NDEBUG */
-
/* Set return value */
ret_value = cache_ptr;
@@ -738,23 +746,6 @@ H5C_dest(H5F_t * f, hid_t dxpl_id)
cache_ptr->cork_list_ptr = NULL;
} /* end if */
- /* Only display count of number of calls to H5C_get_entry_ptr_from_add()
- * if NDEBUG is undefined, and H5C_DO_SANITY_CHECKS is defined. Need
- * this as the print statement will upset windows, and we frequently
- * run debug builds there.
- *
- * Note that the count is still kept whenever NDEBUG is undefined, and
- * is reasonably accessible via debugger.
- */
-#ifndef NDEBUG
-#if H5C_DO_SANITY_CHECKS
- if ( cache_ptr->get_entry_ptr_from_addr_counter > 0 )
- HDfprintf(stdout,
- "*** %ld calls to H5C_get_entry_ptr_from_add(). ***\n",
- cache_ptr->get_entry_ptr_from_addr_counter);
-#endif /* H5C_DO_SANITY_CHECKS */
-#endif /* NDEBUG */
-
#ifndef NDEBUG
cache_ptr->magic = 0;
#endif /* NDEBUG */
@@ -1243,7 +1234,6 @@ H5C_insert_entry(H5F_t * f,
size_t empty_space;
H5C_cache_entry_t *entry_ptr;
H5C_cache_entry_t *test_entry_ptr;
- unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1366,9 +1356,10 @@ H5C_insert_entry(H5F_t * f,
/* Initialize flush dependency height fields */
entry_ptr->flush_dep_parent = NULL;
- for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++)
- entry_ptr->child_flush_dep_height_rc[u] = 0;
- entry_ptr->flush_dep_height = 0;
+ entry_ptr->flush_dep_nparents = 0;
+ entry_ptr->flush_dep_parent_nalloc = 0;
+ entry_ptr->flush_dep_nchildren = 0;
+ entry_ptr->flush_dep_ndirty_children = 0;
entry_ptr->ht_next = NULL;
entry_ptr->ht_prev = NULL;
@@ -1585,31 +1576,32 @@ H5C_mark_entry_dirty(void *thing)
entry_ptr->dirtied = TRUE;
} else if ( entry_ptr->is_pinned ) {
- hbool_t was_pinned_unprotected_and_clean;
+ hbool_t was_clean;
- was_pinned_unprotected_and_clean = ! ( entry_ptr->is_dirty );
+ was_clean = !entry_ptr->is_dirty;
/* mark the entry as dirty if it isn't already */
entry_ptr->is_dirty = TRUE;
entry_ptr->image_up_to_date = FALSE;
- if ( was_pinned_unprotected_and_clean ) {
-
- H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr);
- }
+ /* Propagate the dirty flag up the flush dependency chain if appropriate */
+ if(was_clean) {
+ H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr);
- if ( ! (entry_ptr->in_slist) ) {
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ } /* end if */
+ if(!entry_ptr->in_slist) {
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
}
H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
- } else {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
- "Entry is neither pinned nor protected??")
- }
+ } /* end if */
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is neither pinned nor protected??")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1638,9 +1630,6 @@ H5C_move_entry(H5C_t * cache_ptr,
{
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * test_entry_ptr = NULL;
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- hbool_t was_dirty;
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
#if H5C_DO_SANITY_CHECKS
hbool_t removed_entry_from_slist = FALSE;
#endif /* H5C_DO_SANITY_CHECKS */
@@ -1735,24 +1724,29 @@ H5C_move_entry(H5C_t * cache_ptr,
entry_ptr->addr = new_addr;
- if ( ! ( entry_ptr->destroy_in_progress ) ) {
+ if(!entry_ptr->destroy_in_progress) {
+ hbool_t was_dirty;
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
was_dirty = entry_ptr->is_dirty;
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+ entry_ptr->is_dirty = TRUE;
- entry_ptr->is_dirty = TRUE;
- /* This shouldn't be needed, but it keeps the test code happy */
+ /* This shouldn't be needed, but it keeps the test code happy */
entry_ptr->image_up_to_date = FALSE;
+ /* Propagate the dirty flag up the flush dependency chain if
+ * appropriate */
+ if(!entry_ptr->flush_in_progress) {
+ if(!was_dirty && entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ } /* end if */
+
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
#if H5C_DO_SANITY_CHECKS
-
- if ( removed_entry_from_slist ) {
-
+ if(removed_entry_from_slist) {
/* we just removed the entry from the slist. Thus we
* must touch up cache_ptr->slist_len_increase and
* cache_ptr->slist_size_increase to keep from skewing
@@ -1760,21 +1754,18 @@ H5C_move_entry(H5C_t * cache_ptr,
*/
cache_ptr->slist_len_increase -= 1;
cache_ptr->slist_size_increase -= (int64_t)(entry_ptr->size);
- }
-
+ } /* end if */
#endif /* H5C_DO_SANITY_CHECKS */
- if ( ! ( entry_ptr->flush_in_progress ) ) {
-
+ if(!entry_ptr->flush_in_progress) {
/* skip the update if a flush is in progress */
H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL)
}
- }
+ } /* end if */
H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
done:
-
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
@@ -1786,7 +1777,6 @@ done:
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_move_entry() */
@@ -1842,16 +1832,21 @@ H5C_resize_entry(void *thing, size_t new_size)
hbool_t was_clean;
/* make note of whether the entry was clean to begin with */
- was_clean = ! ( entry_ptr->is_dirty );
+ was_clean = !entry_ptr->is_dirty;
/* mark the entry as dirty if it isn't already */
entry_ptr->is_dirty = TRUE;
entry_ptr->image_up_to_date = FALSE;
/* Release the current image */
- if( entry_ptr->image_ptr )
+ if(entry_ptr->image_ptr)
entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+ /* Propagate the dirty flag up the flush dependency chain if appropriate */
+ if(was_clean && entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+
/* do a flash cache size increase if appropriate */
if ( cache_ptr->flash_size_increase_possible ) {
@@ -4119,9 +4114,25 @@ H5C_unprotect(H5F_t * f,
entry_ptr->image_up_to_date = !entry_ptr->is_dirty;
#endif /* JRM */
- /* Update index for newly dirtied entry */
- if(was_clean && entry_ptr->is_dirty)
- H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
+ /* Check for newly dirtied entry */
+ if(was_clean && entry_ptr->is_dirty) {
+ /* Propagate the flush dep dirty flag up the flush dependency chain
+ * if appropriate */
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+
+ /* Update index for newly dirtied entry */
+ H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
+ } /* end if */
+ /* Check for newly clean entry */
+ else if(!was_clean && !entry_ptr->is_dirty) {
+ /* Propagate the flush dep clean flag up the flush dependency chain
+ * if appropriate */
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ } /* end else-if */
/* Pin or unpin the entry as requested. */
if(pin_entry) {
@@ -4455,103 +4466,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5C_adjust_flush_dependency_rc()
- *
- * Purpose: "Atomicly" adjust flush dependency ref. counts for an entry,
- * as a result of a flush dependency child's height changing.
- *
- * Note: Entry will remain in flush dependency relationship with its
- * child entry (i.e. it's not going to get unpinned as a result
- * of this change), but change could trickle upward, if this
- * entry's height changes and it has a flush dependency parent.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/05/09
- *
- *-------------------------------------------------------------------------
- */
-static void
-H5C_adjust_flush_dependency_rc(H5C_cache_entry_t * cache_entry,
- unsigned old_child_height, unsigned new_child_height)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Sanity checks */
- HDassert(cache_entry);
- HDassert(cache_entry->is_pinned);
- HDassert(cache_entry->flush_dep_height > 0);
- HDassert(cache_entry->flush_dep_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
- HDassert(cache_entry->child_flush_dep_height_rc[old_child_height] > 0);
- HDassert(old_child_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
- HDassert(old_child_height != new_child_height);
- HDassert(new_child_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
-
- /* Adjust ref. counts for entry's flush dependency children heights */
- cache_entry->child_flush_dep_height_rc[new_child_height]++;
- cache_entry->child_flush_dep_height_rc[old_child_height]--;
-
- /* Check for flush dependency height of entry increasing */
- if((new_child_height + 1) > cache_entry->flush_dep_height) {
-
- /* Check if entry has _its_ own parent flush dependency entry */
- if(NULL != cache_entry->flush_dep_parent) {
- /* Adjust flush dependency ref. counts on entry's parent */
- H5C_adjust_flush_dependency_rc(cache_entry->flush_dep_parent, cache_entry->flush_dep_height, new_child_height + 1);
- } /* end if */
-
- /* Set new flush dependency height of entry */
- cache_entry->flush_dep_height = new_child_height + 1;
- } /* end if */
- else {
- /* Check for child's flush dep. height decreasing and ref. count of
- * old child height going to zero, it could mean the parent's
- * flush dependency height dropped.
- */
- if((new_child_height < old_child_height)
- && ((old_child_height + 1) == cache_entry->flush_dep_height)
- && (0 == cache_entry->child_flush_dep_height_rc[old_child_height])) {
- int i; /* Local index variable */
-
- /* Re-scan child flush dependency height ref. counts to determine
- * this entry's height.
- */
-#ifndef NDEBUG
- for(i = (H5C__NUM_FLUSH_DEP_HEIGHTS - 1); i > (int)new_child_height; i--)
- HDassert(0 == cache_entry->child_flush_dep_height_rc[i]);
-#endif /* NDEBUG */
- for(i = (int)new_child_height; i >= 0; i--)
- /* Check for child flush dependencies of this height */
- if(cache_entry->child_flush_dep_height_rc[i] > 0)
- break;
-
- /* Sanity checks */
- HDassert((unsigned)(i + 1) < cache_entry->flush_dep_height);
-
- /* Check if entry has _its_ own parent flush dependency entry */
- if(NULL != cache_entry->flush_dep_parent) {
- /* Adjust flush dependency ref. counts on entry's parent */
- H5C_adjust_flush_dependency_rc(cache_entry->flush_dep_parent, cache_entry->flush_dep_height, (unsigned)(i + 1));
- } /* end if */
-
- /* Set new flush dependency height of entry */
- cache_entry->flush_dep_height = (unsigned)(i + 1);
- } /* end if */
- } /* end else */
-
-
- /* Post-conditions, for successful operation */
- HDassert(cache_entry->is_pinned);
- HDassert(cache_entry->flush_dep_height > 0);
- HDassert(cache_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
- HDassert(cache_entry->child_flush_dep_height_rc[new_child_height] > 0);
-
- FUNC_LEAVE_NOAPI_VOID
-} /* H5C_adjust_flush_dependency_rc() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5C_create_flush_dependency()
*
* Purpose: Initiates a parent<->child entry flush dependency. The parent
@@ -4563,11 +4477,6 @@ H5C_adjust_flush_dependency_rc(H5C_cache_entry_t * cache_entry,
* currently used to implement Single-Writer/Multiple-Reader (SWMR)
* I/O access for data structures in the file).
*
- * Each child entry can have only one parent entry, but parent
- * entries can have >1 child entries. The flush dependency
- * height of a parent entry is one greater than the max. flush
- * dependency height of its children.
- *
* Creating a flush dependency between two entries will also pin
* the parent entry.
*
@@ -4584,9 +4493,6 @@ H5C_create_flush_dependency(void * parent_thing, void * child_thing)
H5C_t * cache_ptr;
H5C_cache_entry_t * parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */
H5C_cache_entry_t * child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */
-#ifndef NDEBUG
- unsigned prev_flush_dep_height = parent_entry->flush_dep_height; /* Previous flush height for parent entry */
-#endif /* NDEBUG */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -4594,45 +4500,34 @@ H5C_create_flush_dependency(void * parent_thing, void * child_thing)
/* Sanity checks */
HDassert(parent_entry);
HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(parent_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
HDassert(H5F_addr_defined(parent_entry->addr));
HDassert(child_entry);
HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(H5F_addr_defined(child_entry->addr));
- HDassert(child_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
cache_ptr = parent_entry->cache_ptr;
- HDassert(parent_entry->ring == child_entry->ring);
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(cache_ptr == child_entry->cache_ptr);
+#ifndef NDEBUG
+ /* Make sure the parent is not already a parent */
+ {
+ unsigned u;
+
+ for(u = 0; u < child_entry->flush_dep_nparents; u++)
+ HDassert(child_entry->flush_dep_parent[u] != parent_entry);
+ } /* end block */
+#endif /* NDEBUG */
/* More sanity checks */
if(child_entry == parent_entry)
HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself")
if(!(parent_entry->is_protected || parent_entry->is_pinned))
HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected")
- if(NULL != child_entry->flush_dep_parent)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry already has flush dependency parent")
- {
- H5C_cache_entry_t *tmp_entry = parent_entry; /* Temporary cache entry in flush dependency chain */
- unsigned tmp_flush_height = 0; /* Different in heights of parent entry */
-
- /* Find the top entry in the flush dependency list */
- while(NULL != tmp_entry->flush_dep_parent) {
- tmp_flush_height++;
- tmp_entry = tmp_entry->flush_dep_parent;
- } /* end while */
-
- /* Check if we will make the dependency chain too long */
- if((tmp_flush_height + child_entry->flush_dep_height + 1)
- > H5C__NUM_FLUSH_DEP_HEIGHTS)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Combined flush dependency height too large")
- }
/* Check for parent not pinned */
if(!parent_entry->is_pinned) {
/* Sanity check */
- HDassert(parent_entry->flush_dep_height == 0);
+ HDassert(parent_entry->flush_dep_nchildren == 0);
HDassert(!parent_entry->pinned_from_client);
HDassert(!parent_entry->pinned_from_cache);
@@ -4644,33 +4539,50 @@ H5C_create_flush_dependency(void * parent_thing, void * child_thing)
/* Mark the entry as pinned from the cache's action (possibly redundantly) */
parent_entry->pinned_from_cache = TRUE;
- /* Increment ref. count for parent's flush dependency children heights */
- parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height]++;
+ /* Check if we need to resize the child's parent array */
+ if(child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) {
+ if(child_entry->flush_dep_parent_nalloc == 0) {
+ /* Array does not exist yet, allocate it */
+ HDassert(!child_entry->flush_dep_parent);
- /* Check for increasing parent flush dependency height */
- if((child_entry->flush_dep_height + 1) > parent_entry->flush_dep_height) {
-
- /* Check if parent entry has _its_ own parent flush dependency entry */
- if(NULL != parent_entry->flush_dep_parent) {
- /* Adjust flush dependency ref. counts on parent entry's parent */
- H5C_adjust_flush_dependency_rc(parent_entry->flush_dep_parent, parent_entry->flush_dep_height, (child_entry->flush_dep_height + 1));
+ if(NULL == (child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_MALLOC(parent, H5C_FLUSH_DEP_PARENT_INIT * sizeof(H5C_cache_entry_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT;
} /* end if */
+ else {
+ /* Resize existing array */
+ HDassert(child_entry->flush_dep_parent);
- /* Increase flush dependency height of parent entry */
- parent_entry->flush_dep_height = child_entry->flush_dep_height + 1;
+ if(NULL == (child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_REALLOC(parent, child_entry->flush_dep_parent, 2 * child_entry->flush_dep_parent_nalloc * sizeof(H5C_cache_entry_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc *= 2;
+ } /* end else */
} /* end if */
- /* Set parent for child entry */
- child_entry->flush_dep_parent = parent_entry;
+ /* Add the dependency to the child's parent array */
+ child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry;
+ child_entry->flush_dep_nparents++;
+
+ /* Increment parent's number of children */
+ parent_entry->flush_dep_nchildren++;
+
+ /* Adjust the number of dirty children */
+ if(child_entry->is_dirty) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren);
+ parent_entry->flush_dep_ndirty_children++;
+ } /* end if */
/* Post-conditions, for successful operation */
HDassert(parent_entry->is_pinned);
- HDassert(parent_entry->flush_dep_height > 0);
- HDassert(parent_entry->flush_dep_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
- HDassert(prev_flush_dep_height <= parent_entry->flush_dep_height);
- HDassert(parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height] > 0);
- HDassert(NULL != child_entry->flush_dep_parent);
+ HDassert(parent_entry->flush_dep_nchildren > 0);
+ HDassert(child_entry->flush_dep_parent);
+ HDassert(child_entry->flush_dep_nparents > 0);
+ HDassert(child_entry->flush_dep_parent_nalloc > 0);
+#ifndef NDEBUG
+ H5C__assert_flush_dep_nocycle(parent_entry, child_entry);
+#endif /* NDEBUG */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -4681,9 +4593,7 @@ done:
* Function: H5C_destroy_flush_dependency()
*
* Purpose: Terminates a parent<-> child entry flush dependency. The
- * parent entry must be pinned and have a positive flush
- * dependency height (which could go to zero as a result of
- * this operation).
+ * parent entry must be pinned.
*
* Return: Non-negative on success/Negative on failure
*
@@ -4698,9 +4608,7 @@ H5C_destroy_flush_dependency(void *parent_thing, void * child_thing)
H5C_t * cache_ptr;
H5C_cache_entry_t * parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */
H5C_cache_entry_t * child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */
-#ifndef NDEBUG
- unsigned prev_flush_dep_height = parent_entry->flush_dep_height; /* Previous flush height for parent entry */
-#endif /* NDEBUG */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -4709,9 +4617,7 @@ H5C_destroy_flush_dependency(void *parent_thing, void * child_thing)
HDassert(parent_entry);
HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(H5F_addr_defined(parent_entry->addr));
- HDassert(parent_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
HDassert(child_entry);
- HDassert(child_entry->flush_dep_parent != child_entry);
HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(H5F_addr_defined(child_entry->addr));
cache_ptr = parent_entry->cache_ptr;
@@ -4722,78 +4628,70 @@ H5C_destroy_flush_dependency(void *parent_thing, void * child_thing)
/* Usage checks */
if(!parent_entry->is_pinned)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned")
- if(0 == parent_entry->flush_dep_height)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't a flush dependency parent")
if(NULL == child_entry->flush_dep_parent)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Child entry doesn't have a flush dependency parent")
- if(0 == parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height])
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry flush dependency ref. count has no child entries of this height")
- if(child_entry->flush_dep_parent != parent_entry)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't flush dependency parent for child entry")
-
- /* Decrement the ref. count for flush dependency height of children for parent entry */
- parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height]--;
-
- /* Check for flush dependency ref. count at this height going to zero and
- * parent entry flush dependency height dropping
- */
- if(((child_entry->flush_dep_height + 1) == parent_entry->flush_dep_height) &&
- 0 == parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height]) {
- int i; /* Local index variable */
-
- /* Reverse scan for new flush dependency height of parent */
-#ifndef NDEBUG
- for(i = (H5C__NUM_FLUSH_DEP_HEIGHTS - 1); i > (int)child_entry->flush_dep_height; i--)
- HDassert(0 == parent_entry->child_flush_dep_height_rc[i]);
-#endif /* NDEBUG */
- for(i = (int)child_entry->flush_dep_height; i >= 0; i--)
- /* Check for child flush dependencies of this height */
- if(parent_entry->child_flush_dep_height_rc[i] > 0)
- break;
-
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Child entry doesn't have a flush dependency parent array")
+ if(0 == parent_entry->flush_dep_nchildren)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry flush dependency ref. count has no child dependencies")
+
+ /* Search for parent in child's parent array. This is a linear search
+ * because we do not expect large numbers of parents. If this changes, we
+ * may wish to change the parent array to a skip list */
+ for(u = 0; u < child_entry->flush_dep_nparents; u++)
+ if(child_entry->flush_dep_parent[u] == parent_entry)
+ break;
+ if(u == child_entry->flush_dep_nparents)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't a flush dependency parent for child entry")
+
+ /* Remove parent entry from child's parent array */
+ if(u < (child_entry->flush_dep_nparents - 1))
+ HDmemmove(&child_entry->flush_dep_parent[u],
+ &child_entry->flush_dep_parent[u + 1],
+ (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0]));
+ child_entry->flush_dep_nparents--;
+
+ /* Adjust parent entry's nchildren and unpin parent if it goes to zero */
+ parent_entry->flush_dep_nchildren--;
+ if(0 == parent_entry->flush_dep_nchildren) {
/* Sanity check */
- HDassert((unsigned)(i + 1) < parent_entry->flush_dep_height);
+ HDassert(parent_entry->pinned_from_cache);
- /* Check if parent entry is a child in another flush dependency relationship */
- if(NULL != parent_entry->flush_dep_parent) {
- /* Change flush dependency ref. counts of parent's parent */
- H5C_adjust_flush_dependency_rc(parent_entry->flush_dep_parent, parent_entry->flush_dep_height, (unsigned)(i + 1));
- } /* end if */
+ /* Check if we should unpin parent entry now */
+ if(!parent_entry->pinned_from_client) {
+ /* Update the replacement policy if the entry is not protected */
+ if(!parent_entry->is_protected)
+ H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, parent_entry, FAIL)
- /* Increase flush dependency height of parent entry */
- parent_entry->flush_dep_height = (unsigned)(i + 1);
+ /* Unpin the entry now */
+ parent_entry->is_pinned = FALSE;
- /* Check for height of parent dropping to zero (i.e. no longer a
- * parent of _any_ child flush dependencies).
- */
- if(0 == parent_entry->flush_dep_height) {
- /* Sanity check */
- HDassert(parent_entry->pinned_from_cache);
-
- /* Check if we should unpin parent entry now */
- if(!parent_entry->pinned_from_client) {
- /* Update the replacement policy if the entry is not protected */
- if(!parent_entry->is_protected)
- H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, parent_entry, FAIL)
+ /* Update the stats for an unpin operation */
+ H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, parent_entry)
+ } /* end if */
- /* Unpin the entry now */
- parent_entry->is_pinned = FALSE;
+ /* Mark the entry as unpinned from the cache's action */
+ parent_entry->pinned_from_cache = FALSE;
+ } /* end if */
- /* Update the stats for an unpin operation */
- H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, parent_entry)
- } /* end if */
+ /* Adjust parent entry's ndirty_children */
+ if(child_entry->is_dirty) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_ndirty_children > 0);
- /* Mark the entry as unpinned from the cache's action */
- parent_entry->pinned_from_cache = FALSE;
- } /* end if */
+ parent_entry->flush_dep_ndirty_children--;
} /* end if */
- /* Reset parent of child entry */
- child_entry->flush_dep_parent = NULL;
-
- /* Post-conditions, for successful operation */
- HDassert(prev_flush_dep_height >= parent_entry->flush_dep_height);
- HDassert(NULL == child_entry->flush_dep_parent);
+ /* Shrink or free the parent array if apporpriate */
+ if(child_entry->flush_dep_nparents == 0) {
+ child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_FREE(parent, child_entry->flush_dep_parent);
+ child_entry->flush_dep_parent_nalloc = 0;
+ } /* end if */
+ else if(child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT
+ && child_entry->flush_dep_nparents
+ <= (child_entry->flush_dep_parent_nalloc / 4)) {
+ if(NULL == (child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_REALLOC(parent, child_entry->flush_dep_parent, (child_entry->flush_dep_parent_nalloc / 4) * sizeof(H5C_cache_entry_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc /= 4;
+ } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -6208,8 +6106,8 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
int32_t i;
int32_t cur_ring_pel_len;
int32_t old_ring_pel_len;
- int32_t passes = 0;
unsigned cooked_flags;
+ unsigned evict_flags;
H5SL_node_t * node_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
@@ -6239,10 +6137,9 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
HDassert(cache_ptr->epoch_markers_active == 0);
/* Filter out the flags that are not relevant to the flush/invalidate.
- * At present, only the H5C__FLUSH_CLEAR_ONLY_FLAG is kept.
*/
cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
-
+ evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
/* The flush proceedure here is a bit strange.
*
@@ -6253,10 +6150,8 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
*
* Since the fractal heap can dirty, resize, and/or move entries
* in is flush callback, it is possible that the cache will still
- * contain dirty entries at this point. If so, we must make up to
- * H5C__MAX_PASSES_ON_FLUSH more passes through the skip list
- * to allow it to empty. If is is not empty at this point, we again
- * scream and die.
+ * contain dirty entries at this point. If so, we must make more
+ * passes through the skip list to allow it to empty.
*
* Further, since clean entries can be dirtied, resized, and/or moved
* as the result of a flush call back (either the entries own, or that
@@ -6289,402 +6184,353 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
old_ring_pel_len = cur_ring_pel_len;
while(cache_ptr->index_ring_len[ring] > 0) {
- unsigned curr_flush_dep_height = 0;
- unsigned flush_dep_passes = 0;
-
- /* Loop over all flush dependency heights of entries */
- while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
- (cache_ptr->index_ring_len[ring] > 0) &&
- (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH)) {
- hbool_t flushed_during_dep_loop = FALSE;
-
- /* first, try to flush-destroy any dirty entries. Do this by
- * making a scan through the slist. Note that new dirty entries
- * may be created by the flush call backs. Thus it is possible
- * that the slist will not be empty after we finish the scan.
- */
+ /* first, try to flush-destroy any dirty entries. Do this by
+ * making a scan through the slist. Note that new dirty entries
+ * may be created by the flush call backs. Thus it is possible
+ * that the slist will not be empty after we finish the scan.
+ */
#if H5C_DO_SANITY_CHECKS
- /* Depending on circumstances, H5C__flush_single_entry() will
- * remove dirty entries from the slist as it flushes them.
- * Thus for sanity checks we must make note of the initial
- * slist length and size before we do any flushes.
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* There is also the possibility that entries will be
- * dirtied, resized, moved, and/or removed from the cache
- * as the result of calls to the flush callbacks. We use
- * the slist_len_increase and slist_size_increase increase
- * fields in struct H5C_t to track these changes for purpose
- * of sanity checking.
- *
- * To this end, we must zero these fields before we start
- * the pass through the slist.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
+ /* Depending on circumstances, H5C__flush_single_entry() will
+ * remove dirty entries from the slist as it flushes them.
+ * Thus for sanity checks we must make note of the initial
+ * slist length and size before we do any flushes.
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* There is also the possibility that entries will be
+ * dirtied, resized, moved, and/or removed from the cache
+ * as the result of calls to the flush callbacks. We use
+ * the slist_len_increase and slist_size_increase increase
+ * fields in struct H5C_t to track these changes for purpose
+ * of sanity checking.
+ *
+ * To this end, we must zero these fields before we start
+ * the pass through the slist.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
- /* Finally, reset the flushed_slist_len and flushed_slist_size
- * fields to zero, as these fields are used to accumulate
- * the slist lenght and size that we see as we scan through
- * the slist.
- */
- flushed_slist_len = 0;
- flushed_slist_size = 0;
+ /* Finally, reset the flushed_slist_len and flushed_slist_size
+ * fields to zero, as these fields are used to accumulate
+ * the slist lenght and size that we see as we scan through
+ * the slist.
+ */
+ flushed_slist_len = 0;
+ flushed_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- /* set the cache_ptr->slist_change_in_pre_serialize and
- * cache_ptr->slist_change_in_serialize to false.
- *
- * These flags are set to TRUE by H5C__flush_single_entry if the
- * slist is modified by a pre_serialize or serialize call
- * respectively.
- *
- * H5C_flush_invalidate_cache() uses these flags to detect any
- * modifications to the slist that might corrupt the scan of
- * the slist -- and restart the scan in this event.
- */
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- /* this done, start the scan of the slist */
- restart_slist_scan = TRUE;
- while(restart_slist_scan || (node_ptr != NULL)) {
- if(restart_slist_scan) {
- restart_slist_scan = FALSE;
-
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if(node_ptr == NULL)
- /* the slist is empty -- break out of inner loop */
- break;
-
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
-
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- HDassert(next_entry_ptr->ring >= ring);
- } /* end if */
+ /* set the cache_ptr->slist_change_in_pre_serialize and
+ * cache_ptr->slist_change_in_serialize to false.
+ *
+ * These flags are set to TRUE by H5C__flush_single_entry if the
+ * slist is modified by a pre_serialize or serialize call
+ * respectively.
+ *
+ * H5C_flush_invalidate_cache() uses these flags to detect any
+ * modifications to the slist that might corrupt the scan of
+ * the slist -- and restart the scan in this event.
+ */
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ /* this done, start the scan of the slist */
+ restart_slist_scan = TRUE;
+ while(restart_slist_scan || (node_ptr != NULL)) {
+ if(restart_slist_scan) {
+ restart_slist_scan = FALSE;
+
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ if(node_ptr == NULL)
+ /* the slist is empty -- break out of inner loop */
+ break;
- entry_ptr = next_entry_ptr;
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- /* It is possible that entries will be dirtied, resized,
- * flushed, or removed from the cache via the take ownership
- * flag as the result of pre_serialize or serialized callbacks.
- *
- * This in turn can corrupt the scan through the slist.
- *
- * We test for slist modifications in the pre_serialize
- * and serialize callbacks, and restart the scan of the
- * slist if we find them. However, best we do some extra
- * sanity checking just in case.
- */
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->ring >= ring);
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ } /* end if */
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
- if(node_ptr != NULL) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- HDassert(next_entry_ptr->ring >= ring);
- HDassert(entry_ptr != next_entry_ptr);
- } /* end if */
- else
- next_entry_ptr = NULL;
+ entry_ptr = next_entry_ptr;
- /* Note that we now remove nodes from the slist as we flush
- * the associated entries, instead of leaving them there
- * until we are done, and then destroying all nodes in
- * the slist.
- *
- * While this optimization used to be easy, with the possibility
- * of new entries being added to the slist in the midst of the
- * flush, we must keep the slist in cannonical form at all
- * times.
- */
- HDassert(entry_ptr != NULL);
- HDassert(entry_ptr->in_slist);
+ /* It is possible that entries will be dirtied, resized,
+ * flushed, or removed from the cache via the take ownership
+ * flag as the result of pre_serialize or serialized callbacks.
+ *
+ * This in turn can corrupt the scan through the slist.
+ *
+ * We test for slist modifications in the pre_serialize
+ * and serialize callbacks, and restart the scan of the
+ * slist if we find them. However, best we do some extra
+ * sanity checking just in case.
+ */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->ring >= ring);
- if(((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) &&
- (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->ring == ring)) {
- if(entry_ptr->is_protected) {
- /* we have major problems -- but lets flush
- * everything we can before we flag an error.
- */
- protected_entries++;
- } else if(entry_ptr->is_pinned) {
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
+ if(node_ptr != NULL) {
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else
+ next_entry_ptr = NULL;
+
+ /* Note that we now remove nodes from the slist as we flush
+ * the associated entries, instead of leaving them there
+ * until we are done, and then destroying all nodes in
+ * the slist.
+ *
+ * While this optimization used to be easy, with the possibility
+ * of new entries being added to the slist in the midst of the
+ * flush, we must keep the slist in cannonical form at all
+ * times.
+ */
+ HDassert(entry_ptr != NULL);
+ HDassert(entry_ptr->in_slist);
+
+ if(((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) &&
+ (entry_ptr->ring == ring)) {
+ if(entry_ptr->is_protected) {
+ /* we have major problems -- but lets flush
+ * everything we can before we flag an error.
+ */
+ protected_entries++;
+ } else if(entry_ptr->is_pinned) {
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush, but don't tell
- * H5C__flush_single_entry() to destroy the entry
- * as pinned entries can't be evicted.
- */
- if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
#if H5C_DO_SANITY_CHECKS
- /* update flushed_slist_len & flushed_slist_size
- * before the flush. Note that the entry will
- * be removed from the slist after the flush,
- * and thus may be resized by the flush callback.
- * This is OK, as we will catch the size delta in
- * cache_ptr->slist_size_increase.
- *
- */
- flushed_slist_len++;
- flushed_slist_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
+ /* update flushed_slist_len & flushed_slist_size
+ * before the flush. Note that the entry will
+ * be removed from the slist after the flush,
+ * and thus may be resized by the flush callback.
+ * This is OK, as we will catch the size delta in
+ * cache_ptr->slist_size_increase.
+ *
+ */
+ flushed_slist_len++;
+ flushed_slist_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET, entry_size_change_ptr, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET, entry_size_change_ptr, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
#if H5C_DO_SANITY_CHECKS
- /* entry size may have changed during the flush.
- * Update flushed_slist_size to account for this.
- */
- flushed_slist_size += entry_size_change;
+ /* entry size may have changed during the flush.
+ * Update flushed_slist_size to account for this.
+ */
+ flushed_slist_size += entry_size_change;
#endif /* H5C_DO_SANITY_CHECKS */
- flushed_during_dep_loop = TRUE;
- if((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize)) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
- } /* end if */
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
} /* end if */
- else {
- if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
+ } /* end if */
+ else {
#if H5C_DO_SANITY_CHECKS
- /* update flushed_slist_len & flushed_slist_size
- * before the flush. Note that the entry will
- * be removed from the slist after the flush,
- * and thus may be resized by the flush callback.
- * This is OK, as we will catch the size delta in
- * cache_ptr->slist_size_increase.
- *
- */
- flushed_slist_len++;
- flushed_slist_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
+ /* update flushed_slist_len & flushed_slist_size
+ * before the flush. Note that the entry will
+ * be removed from the slist after the flush,
+ * and thus may be resized by the flush callback.
+ * This is OK, as we will catch the size delta in
+ * cache_ptr->slist_size_increase.
+ *
+ */
+ flushed_slist_len++;
+ flushed_slist_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
- (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
- entry_size_change_ptr, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed.")
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
+ entry_size_change_ptr, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed.")
#if H5C_DO_SANITY_CHECKS
- /* entry size may have changed during the flush.
- * Update flushed_slist_size to account for this.
- */
- flushed_slist_size += entry_size_change;
+ /* entry size may have changed during the flush.
+ * Update flushed_slist_size to account for this.
+ */
+ flushed_slist_size += entry_size_change;
#endif /* H5C_DO_SANITY_CHECKS */
- flushed_during_dep_loop = TRUE;
-
- if((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize)) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- } /* end if */
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
- } /* end else */
- } /* end if */
- } /* end while loop scanning skip list */
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ } /* end else */
+ } /* end if */
+ } /* end while loop scanning skip list */
#if H5C_DO_SANITY_CHECKS
- /* It is possible that entries were added to the slist during
- * the scan, either before or after scan pointer. The following
- * asserts take this into account.
- *
- * Don't bother with the sanity checks if node_ptr != NULL, as
- * in this case we broke out of the loop because it got changed
- * out from under us.
- */
+ /* It is possible that entries were added to the slist during
+ * the scan, either before or after scan pointer. The following
+ * asserts take this into account.
+ *
+ * Don't bother with the sanity checks if node_ptr != NULL, as
+ * in this case we broke out of the loop because it got changed
+ * out from under us.
+ */
- if(node_ptr == NULL) {
- HDassert((flushed_slist_len + cache_ptr->slist_len) ==
- (initial_slist_len + cache_ptr->slist_len_increase));
- HDassert((flushed_slist_size + (int64_t)cache_ptr->slist_size) ==
- ((int64_t)initial_slist_size + cache_ptr->slist_size_increase));
- } /* end if */
+ if(node_ptr == NULL) {
+ HDassert((flushed_slist_len + cache_ptr->slist_len) ==
+ (initial_slist_len + cache_ptr->slist_len_increase));
+ HDassert((flushed_slist_size + (int64_t)cache_ptr->slist_size) ==
+ ((int64_t)initial_slist_size + cache_ptr->slist_size_increase));
+ } /* end if */
#endif /* H5C_DO_SANITY_CHECKS */
- /* Since we are doing a destroy, we must make a pass through
- * the hash table and try to flush - destroy all entries that
- * remain.
- *
- * It used to be that all entries remaining in the cache at
- * this point had to be clean, but with the fractal heap mods
- * this may not be the case. If so, we will flush entries out
- * of increasing address order.
- *
- * Writes to disk are possible here.
- */
- for(i = 0; i < H5C__HASH_TABLE_LEN; i++) {
- next_entry_ptr = cache_ptr->index[i];
-
- while(next_entry_ptr != NULL) {
- entry_ptr = next_entry_ptr;
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring >= ring);
-
- next_entry_ptr = entry_ptr->ht_next;
- HDassert((next_entry_ptr == NULL) ||
- (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
-
- if(((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) &&
- (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->ring == ring)) {
-
- if(entry_ptr->is_protected) {
- /* we have major problems -- but lets flush and
- * destroy everything we can before we flag an
- * error.
- */
- protected_entries++;
- if(!entry_ptr->in_slist)
- HDassert(!(entry_ptr->is_dirty));
- } else if(!(entry_ptr->is_pinned)) {
+ /* Since we are doing a destroy, we must make a pass through
+ * the hash table and try to flush - destroy all entries that
+ * remain.
+ *
+ * It used to be that all entries remaining in the cache at
+ * this point had to be clean, but with the fractal heap mods
+ * this may not be the case. If so, we will flush entries out
+ * of increasing address order.
+ *
+ * Writes to disk are possible here.
+ */
+ for(i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+ next_entry_ptr = cache_ptr->index[i];
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush.
+ while(next_entry_ptr != NULL) {
+ entry_ptr = next_entry_ptr;
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
+
+ next_entry_ptr = entry_ptr->ht_next;
+ HDassert((next_entry_ptr == NULL) ||
+ (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
+
+ if(((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) &&
+ (entry_ptr->ring == ring)) {
+
+ if(entry_ptr->is_protected) {
+ /* we have major problems -- but lets flush and
+ * destroy everything we can before we flag an
+ * error.
+ */
+ protected_entries++;
+ if(!entry_ptr->in_slist)
+ HDassert(!(entry_ptr->is_dirty));
+ } else if(!(entry_ptr->is_pinned)) {
+
+ /* if *entry_ptr is dirty, it is possible
+ * that one or more other entries may be
+ * either removed from the cache, loaded
+ * into the cache, or moved to a new location
+ * in the file as a side effect of the flush.
+ *
+ * If this happens, and one of the target
+ * entries happens to be the next entry in
+ * the hash bucket, we could find ourselves
+ * either find ourselves either scanning a
+ * non-existant entry, scanning through a
+ * different bucket, or skipping an entry.
+ *
+ * Neither of these are good, so restart the
+ * the scan at the head of the hash bucket
+ * after the flush if *entry_ptr was dirty,
+ * on the off chance that the next entry was
+ * a target.
+ *
+ * This is not as inefficient at it might seem,
+ * as hash buckets typically have at most two
+ * or three entries.
+ */
+ hbool_t entry_was_dirty;
+
+ entry_was_dirty = entry_ptr->is_dirty;
+
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
+ NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.")
+
+ if(entry_was_dirty) {
+ /* update stats for hash bucket scan
+ * restart here.
+ * -- JRM
*/
- if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
- /* if *entry_ptr is dirty, it is possible
- * that one or more other entries may be
- * either removed from the cache, loaded
- * into the cache, or moved to a new location
- * in the file as a side effect of the flush.
- *
- * If this happens, and one of the target
- * entries happens to be the next entry in
- * the hash bucket, we could find ourselves
- * either find ourselves either scanning a
- * non-existant entry, scanning through a
- * different bucket, or skipping an entry.
- *
- * Neither of these are good, so restart the
- * the scan at the head of the hash bucket
- * after the flush if *entry_ptr was dirty,
- * on the off chance that the next entry was
- * a target.
- *
- * This is not as inefficient at it might seem,
- * as hash buckets typically have at most two
- * or three entries.
- */
- hbool_t entry_was_dirty;
-
- entry_was_dirty = entry_ptr->is_dirty;
-
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
- (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
- NULL, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.")
-
- if(entry_was_dirty) {
- /* update stats for hash bucket scan
- * restart here.
- * -- JRM
- */
- next_entry_ptr = cache_ptr->index[i];
- H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr)
- } /* end if */
-
- flushed_during_dep_loop = TRUE;
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ next_entry_ptr = cache_ptr->index[i];
+ H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr)
} /* end if */
} /* end if */
- /* We can't do anything if the entry is pinned. The
- * hope is that the entry will be unpinned as the
- * result of destroys of entries that reference it.
- *
- * We detect this by noting the change in the number
- * of pinned entries from pass to pass. If it stops
- * shrinking before it hits zero, we scream and die.
- */
- /* if the serialize function on the entry we last evicted
- * loaded an entry into cache (as Quincey has promised me
- * it never will), and if the cache was full, it is
- * possible that *next_entry_ptr was flushed or evicted.
- *
- * Test to see if this happened here. Note that if this
- * test is triggred, we are accessing a deallocated piece
- * of dynamically allocated memory, so we just scream and
- * die.
- *
- * Update: The code to restart the scan after flushes
- * of dirty entries should make it impossible
- * to satisfy the following test. Leave it in
- * in case I am wrong.
- * -- JRM
- */
- if((next_entry_ptr != NULL) && (next_entry_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC))
- /* Something horrible has happened to
- * *next_entry_ptr -- scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr->magic is invalid?!?!?.")
- } /* end while loop scanning hash table bin */
- } /* end for loop scanning hash table */
-
- /* Check for incrementing flush dependency height */
- if(flushed_during_dep_loop) {
- /* If we flushed an entry at this flush dependency height
- * start over at the bottom level of the flush dependencies
- */
- curr_flush_dep_height = 0;
-
- /* Make certain we don't get stuck in an infinite loop */
- flush_dep_passes++;
- } /* end if */
- else
- curr_flush_dep_height++;
+ } /* end if */
- } /* end while loop over flush dependency heights */
+ /* We can't do anything if the entry is pinned. The
+ * hope is that the entry will be unpinned as the
+ * result of destroys of entries that reference it.
+ *
+ * We detect this by noting the change in the number
+ * of pinned entries from pass to pass. If it stops
+ * shrinking before it hits zero, we scream and die.
+ */
+ /* if the serialize function on the entry we last evicted
+ * loaded an entry into cache (as Quincey has promised me
+ * it never will), and if the cache was full, it is
+ * possible that *next_entry_ptr was flushed or evicted.
+ *
+ * Test to see if this happened here. Note that if this
+ * test is triggred, we are accessing a deallocated piece
+ * of dynamically allocated memory, so we just scream and
+ * die.
+ *
+ * Update: The code to restart the scan after flushes
+ * of dirty entries should make it impossible
+ * to satisfy the following test. Leave it in
+ * in case I am wrong.
+ * -- JRM
+ */
+ if((next_entry_ptr != NULL) && (next_entry_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC))
+ /* Something horrible has happened to
+ * *next_entry_ptr -- scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr->magic is invalid?!?!?.")
+ } /* end while loop scanning hash table bin */
+ } /* end for loop scanning hash table */
old_ring_pel_len = cur_ring_pel_len;
entry_ptr = cache_ptr->pel_head_ptr;
@@ -6700,20 +6546,19 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
} /* end while */
if((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
+ /* Don't error if allowed to have pinned entries remaining */
+ if(evict_flags)
+ HGOTO_DONE(TRUE)
+
/* The number of pinned entries in the ring is positive, and
* it is not declining. Scream and die.
*/
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
- } else if((cur_ring_pel_len == 0) && (old_ring_pel_len == 0)) {
- /* increment the pass count */
- passes++;
- }
+ } /* end if */
- if(passes >= H5C__MAX_PASSES_ON_FLUSH)
- /* we have exceeded the maximum number of passes through the
- * cache to flush and destroy all entries. Scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Maximum passes on flush exceeded.")
+ HDassert(protected_entries == cache_ptr->pl_len);
+ if((protected_entries > 0) && (protected_entries == cache_ptr->index_len))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Only protected entries left in cache, protected_entries = %d", (int)protected_entries)
} /* main while loop */
/* Invariants, after destroying all entries in the ring */
@@ -6767,12 +6612,12 @@ herr_t
H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
{
H5C_t * cache_ptr = f->shared->cache;
+ hbool_t destroy;
hbool_t flushed_entries_last_pass;
hbool_t flush_marked_entries;
hbool_t ignore_protected;
hbool_t tried_to_flush_protected_entry = FALSE;
hbool_t restart_slist_scan;
- int32_t passes = 0;
int32_t protected_entries = 0;
H5SL_node_t * node_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
@@ -6807,6 +6652,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
+ destroy = ( (flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 );
flush_marked_entries = ( (flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0 );
if(!flush_marked_entries)
@@ -6834,264 +6680,182 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
cache_ptr->slist_change_in_pre_serialize = FALSE;
cache_ptr->slist_change_in_serialize = FALSE;
- while((passes < H5C__MAX_PASSES_ON_FLUSH) &&
- (cache_ptr->slist_ring_len[ring] > 0) &&
+ while((cache_ptr->slist_ring_len[ring] > 0) &&
(protected_entries == 0) &&
(flushed_entries_last_pass)) {
- unsigned curr_flush_dep_height = 0;
- unsigned flush_dep_passes = 0;
-
flushed_entries_last_pass = FALSE;
- /* Loop over all flush dependency heights of entries */
- while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
- (cache_ptr->slist_ring_len[ring] > 0) &&
- (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH)) {
- hbool_t flushed_during_dep_loop = FALSE;
-
#if H5C_DO_SANITY_CHECKS
- /* For sanity checking, try to verify that the skip list has
- * the expected size and number of entries at the end of each
- * internal while loop (see below).
- *
- * Doing this get a bit tricky, as depending on flags, we may
- * or may not flush all the entries in the slist.
- *
- * To make things more entertaining, with the advent of the
- * fractal heap, the entry serialize callback can cause entries
- * to be dirtied, resized, and/or moved. Also, the
- * pre_serialize callback can result in an entry being
- * removed from the cache via the take ownership flag.
- *
- * To deal with this, we first make note of the initial
- * skip list length and size:
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
+ /* For sanity checking, try to verify that the skip list has
+ * the expected size and number of entries at the end of each
+ * internal while loop (see below).
+ *
+ * Doing this get a bit tricky, as depending on flags, we may
+ * or may not flush all the entries in the slist.
+ *
+ * To make things more entertaining, with the advent of the
+ * fractal heap, the entry serialize callback can cause entries
+ * to be dirtied, resized, and/or moved. Also, the
+ * pre_serialize callback can result in an entry being
+ * removed from the cache via the take ownership flag.
+ *
+ * To deal with this, we first make note of the initial
+ * skip list length and size:
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
- /* We then zero counters that we use to track the number
- * and total size of entries flushed:
- */
- flushed_entries_count = 0;
- flushed_entries_size = 0;
-
- /* As mentioned above, there is the possibility that
- * entries will be dirtied, resized, flushed, or removed
- * from the cache via the take ownership flag during
- * our pass through the skip list. To capture the number
- * of entries added, and the skip list size delta,
- * zero the slist_len_increase and slist_size_increase of
- * the cache's instance of H5C_t. These fields will be
- * updated elsewhere to account for slist insertions and/or
- * dirty entry size changes.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
+ /* We then zero counters that we use to track the number
+ * and total size of entries flushed:
+ */
+ flushed_entries_count = 0;
+ flushed_entries_size = 0;
+
+ /* As mentioned above, there is the possibility that
+ * entries will be dirtied, resized, flushed, or removed
+ * from the cache via the take ownership flag during
+ * our pass through the skip list. To capture the number
+ * of entries added, and the skip list size delta,
+ * zero the slist_len_increase and slist_size_increase of
+ * the cache's instance of H5C_t. These fields will be
+ * updated elsewhere to account for slist insertions and/or
+ * dirty entry size changes.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
- /* at the end of the loop, use these values to compute the
- * expected slist length and size and compare this with the
- * value recorded in the cache's instance of H5C_t.
- */
+ /* at the end of the loop, use these values to compute the
+ * expected slist length and size and compare this with the
+ * value recorded in the cache's instance of H5C_t.
+ */
#endif /* H5C_DO_SANITY_CHECKS */
- restart_slist_scan = TRUE;
-
- while((restart_slist_scan ) || (node_ptr != NULL)) {
- if(restart_slist_scan) {
- restart_slist_scan = FALSE;
+ restart_slist_scan = TRUE;
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ while((restart_slist_scan ) || (node_ptr != NULL)) {
+ if(restart_slist_scan) {
+ restart_slist_scan = FALSE;
- if(node_ptr == NULL)
- /* the slist is empty -- break out of inner loop */
- break;
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if(node_ptr == NULL)
+ /* the slist is empty -- break out of inner loop */
+ break;
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- } /* end if */
-
- entry_ptr = next_entry_ptr;
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- /* With the advent of the fractal heap, the free space
- * manager, and the version 3 cache, it is possible
- * that the pre-serialize or serialize callback will
- * dirty, resize, or take ownership of other entries
- * in the cache.
- *
- * To deal with this, I have inserted code to detect any
- * change in the skip list not directly under the control
- * of this function. If such modifications are detected,
- * we must re-start the scan of the skip list to avoid
- * the possibility that the target of the next_entry_ptr
- * may have been flushed or deleted from the cache.
- *
- * To verify that all such possibilities have been dealt
- * with, we do a bit of extra sanity checking on
- * entry_ptr.
- */
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
- if(!flush_marked_entries || entry_ptr->flush_marker)
- HDassert(entry_ptr->ring >= ring);
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ } /* end if */
+
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, the free space
+ * manager, and the version 3 cache, it is possible
+ * that the pre-serialize or serialize callback will
+ * dirty, resize, or take ownership of other entries
+ * in the cache.
+ *
+ * To deal with this, I have inserted code to detect any
+ * change in the skip list not directly under the control
+ * of this function. If such modifications are detected,
+ * we must re-start the scan of the skip list to avoid
+ * the possibility that the target of the next_entry_ptr
+ * may have been flushed or deleted from the cache.
+ *
+ * To verify that all such possibilities have been dealt
+ * with, we do a bit of extra sanity checking on
+ * entry_ptr.
+ */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+ if(!flush_marked_entries || entry_ptr->flush_marker)
+ HDassert(entry_ptr->ring >= ring);
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
- if(node_ptr != NULL) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
-
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- if(!flush_marked_entries || next_entry_ptr->flush_marker)
- HDassert(next_entry_ptr->ring >= ring);
- HDassert(entry_ptr != next_entry_ptr);
- } /* end if */
- else
- next_entry_ptr = NULL;
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
+ if(node_ptr != NULL) {
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ if(!flush_marked_entries || next_entry_ptr->flush_marker)
+ HDassert(next_entry_ptr->ring >= ring);
- HDassert(entry_ptr != NULL);
- HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else
+ next_entry_ptr = NULL;
- if(((!flush_marked_entries) || (entry_ptr->flush_marker)) &&
- ((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) &&
- (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->ring == ring)) {
- if(entry_ptr->is_protected) {
- /* we probably have major problems -- but lets
- * flush everything we can before we decide
- * whether to flag an error.
- */
- tried_to_flush_protected_entry = TRUE;
- protected_entries++;
- } /* end if */
- else if(entry_ptr->is_pinned) {
+ HDassert(entry_ptr != NULL);
+ HDassert(entry_ptr->in_slist);
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush. Note that we
- * aren't trying to do a destroy here, so that
- * is not an issue.
- */
- if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
+ if(((!flush_marked_entries) || (entry_ptr->flush_marker)) &&
+ ((!entry_ptr->flush_me_last) ||
+ (entry_ptr->flush_me_last &&
+ ((cache_ptr->num_last_entries >= cache_ptr->slist_len) ||
+ (flush_marked_entries && entry_ptr->flush_marker)))) &&
+ ( ( entry_ptr->flush_dep_nchildren == 0 ) ||
+ ( ( ! destroy ) &&
+ ( entry_ptr->flush_dep_ndirty_children == 0 ) ) ) &&
+ (entry_ptr->ring == ring)) {
+ if(entry_ptr->is_protected) {
+ /* we probably have major problems -- but lets
+ * flush everything we can before we decide
+ * whether to flag an error.
+ */
+ tried_to_flush_protected_entry = TRUE;
+ protected_entries++;
+ } /* end if */
+ else {
#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
+ flushed_entries_count++;
+ flushed_entries_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
-
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
#if H5C_DO_SANITY_CHECKS
- /* it is possible that the entry size changed
- * during flush -- update flushed_entries_size
- * to account for this.
- */
- flushed_entries_size += entry_size_change;
+ /* it is possible that the entry size changed
+ * during flush -- update flushed_entries_size
+ * to account for this.
+ */
+ flushed_entries_size += entry_size_change;
#endif /* H5C_DO_SANITY_CHECKS */
- flushed_during_dep_loop = TRUE;
-
- if((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize)) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- } /* end if */
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
- } /* end else-if */
- else {
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush. Note that we
- * aren't trying to do a destroy here, so that
- * is not an issue.
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
*/
- if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
-#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
-
-#if H5C_DO_SANITY_CHECKS
- /* it is possible that the entry size changed
- * during flush -- update flushed_entries_size
- * to account for this.
- */
- flushed_entries_size += entry_size_change;
-#endif /* H5C_DO_SANITY_CHECKS */
-
- flushed_during_dep_loop = TRUE;
-
- if((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize)) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- } /* end if */
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
- } /* end else */
- } /* end if */
- } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
-
- /* Check for incrementing flush dependency height */
- if(flushed_during_dep_loop) {
-
- /* If we flushed an entry at this flush dependency height
- * start over at the bottom level of the flush dependencies
- */
- curr_flush_dep_height = 0;
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
- /* Make certain we don't get stuck in an infinite loop */
- flush_dep_passes++;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
- /* Set flag for outer loop */
- flushed_entries_last_pass = TRUE;
+ flushed_entries_last_pass = TRUE;
+ } /* end else */
} /* end if */
- else
- curr_flush_dep_height++;
- } /* while ( curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) */
-
- passes++;
+ } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
#if H5C_DO_SANITY_CHECKS
/* Verify that the slist size and length are as expected. */
@@ -7108,9 +6872,6 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
if(((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
- if((cache_ptr->slist_len != 0) && (passes >= H5C__MAX_PASSES_ON_FLUSH))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush pass limit exceeded.")
-
#if H5C_DO_SANITY_CHECKS
if(!flush_marked_entries) {
HDassert(cache_ptr->slist_ring_len[ring] == 0);
@@ -7442,6 +7203,10 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
+
+ /* verify that the entry is no longer part of any flush dependencies */
+ HDassert(entry_ptr->flush_dep_nparents == 0);
+ HDassert(entry_ptr->flush_dep_nchildren == 0);
}
else {
HDassert(clear_only || write_entry);
@@ -7471,6 +7236,16 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
if(entry_ptr->type->clear && (entry_ptr->type->clear)(f, (void *)entry_ptr, FALSE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to clear entry")
+
+ /* Propagate the clean flag up the flush dependency chain if
+ * appropriate */
+ if(was_dirty) {
+ HDassert(entry_ptr->flush_dep_ndirty_children == 0);
+
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep clean flag")
+ } /* end if */
}
/* reset the flush_in progress flag */
@@ -7646,7 +7421,6 @@ H5C_load_entry(H5F_t * f,
void * thing = NULL; /* Pointer to thing loaded */
H5C_cache_entry_t *entry = NULL; /* Alias for thing loaded, as cache entry */
size_t len; /* Size of image in file */
- unsigned u; /* Local index variable */
#ifdef H5_HAVE_PARALLEL
int mpi_rank = 0; /* MPI process rank */
MPI_Comm comm = MPI_COMM_NULL; /* File MPI Communicator */
@@ -7986,9 +7760,10 @@ H5C_load_entry(H5F_t * f,
/* Initialize flush dependency height fields */
entry->flush_dep_parent = NULL;
- for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++)
- entry->child_flush_dep_height_rc[u] = 0;
- entry->flush_dep_height = 0;
+ entry->flush_dep_nparents = 0;
+ entry->flush_dep_parent_nalloc = 0;
+ entry->flush_dep_nchildren = 0;
+ entry->flush_dep_ndirty_children = 0;
entry->ht_next = NULL;
entry->ht_prev = NULL;
@@ -8834,188 +8609,6 @@ H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t *target_ptr)
/*-------------------------------------------------------------------------
*
- * Function: H5C_get_entry_ptr_from_addr()
- *
- * Purpose: Debugging function that attempts to look up an entry in the
- * cache by its file address, and if found, returns a pointer
- * to the entry in *entry_ptr_ptr. If the entry is not in the
- * cache, *entry_ptr_ptr is set to NULL.
- *
- * WARNING: This call should be used only in debugging
- * routines, and it should be avoided when
- * possible.
- *
- * Further, if we ever multi-thread the cache,
- * this routine will have to be either discarded
- * or heavily re-worked.
- *
- * Finally, keep in mind that the entry whose
- * pointer is obtained in this fashion may not
- * be in a stable state.
- *
- * Note that this function is only defined if NDEBUG
- * is not defined.
- *
- * As heavy use of this function is almost certainly a
- * bad idea, the metadata cache tracks the number of
- * successful calls to this function, and (if
- * H5C_DO_SANITY_CHECKS is defined) displays any
- * non-zero count on cache shutdown.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 5/30/14
- *
- * Changes:
- *
- * None.
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-herr_t
-H5C_get_entry_ptr_from_addr(const H5F_t *f,
- haddr_t addr,
- void ** entry_ptr_ptr)
-{
- H5C_t * cache_ptr;
- H5C_cache_entry_t * entry_ptr = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f );
- HDassert( f->shared );
-
- cache_ptr = f->shared->cache;
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( H5F_addr_defined(addr) );
- HDassert( entry_ptr_ptr != NULL );
-
- /* this test duplicates two of the above asserts, but we need an
- * invocation of HGOTO_ERROR to keep the compiler happy.
- */
- if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
- }
-
- H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
-
- if ( entry_ptr == NULL ) {
-
- /* the entry doesn't exist in the cache -- report this
- * and quit.
- */
- *entry_ptr_ptr = NULL;
-
- } else {
-
- *entry_ptr_ptr = entry_ptr;
-
- /* increment call counter */
- (cache_ptr->get_entry_ptr_from_addr_counter)++;
- }
-
-done:
-
- FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5C_get_entry_ptr_from_addr() */
-
-#endif /* NDEBUG */
-
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C_verify_entry_type()
- *
- * Purpose: Debugging function that attempts to look up an entry in the
- * cache by its file address, and if found, test to see if its
- * type field contains the expted value.
- *
- * If the specified entry is in cache, *in_cache_ptr is set
- * to TRUE, and *type_ok_ptr is set to TRUE or FALSE
- * depending on whether the entries type field matches the
- * expected_type parameter
- *
- * If the target entry is not in cache, *in_cache_ptr is
- * set to FALSE, and *type_ok_ptr is undefined.
- *
- * Note that this function is only defined if NDEBUG
- * is not defined.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 5/30/14
- *
- * Changes:
- *
- * None.
- *
- *-------------------------------------------------------------------------
- */
-#ifndef NDEBUG
-herr_t
-H5C_verify_entry_type(const H5F_t *f,
- haddr_t addr,
- const H5C_class_t * expected_type,
- hbool_t * in_cache_ptr,
- hbool_t * type_ok_ptr)
-{
- H5C_t * cache_ptr;
- H5C_cache_entry_t * entry_ptr = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert( f );
- HDassert( f->shared );
-
- cache_ptr = f->shared->cache;
-
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( H5F_addr_defined(addr) );
- HDassert( in_cache_ptr != NULL );
- HDassert( type_ok_ptr != NULL );
-
- /* this test duplicates two of the above asserts, but we need an
- * invocation of HGOTO_ERROR to keep the compiler happy.
- */
- if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
- }
-
- H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
-
- if ( entry_ptr == NULL ) {
-
- /* the entry doesn't exist in the cache -- report this
- * and quit.
- */
- *in_cache_ptr = FALSE;
-
- } else {
-
- *in_cache_ptr = TRUE;
- *type_ok_ptr = (expected_type == entry_ptr->type);
- }
-
-done:
-
- FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5C_verify_entry_type() */
-
-#endif /* NDEBUG */
-
-
-/*-------------------------------------------------------------------------
- *
* Function: H5C__flush_marked_entries
*
* Purpose: Flushes all marked entries in the cache.
@@ -9145,6 +8738,122 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_dirty()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming dirty or having its flush_dep_ndirty_children
+ * increased from 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 11/13/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry)
+{
+ unsigned u; /* Local index variable */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(entry);
+
+ /* Iterate over the parent entries, if any */
+ for(u = 0; u < entry->flush_dep_nparents; u++) {
+ /* Sanity check */
+ HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children < entry->flush_dep_parent[u]->flush_dep_nchildren);
+
+ /* Adjust the parent's number of dirty children */
+ entry->flush_dep_parent[u]->flush_dep_ndirty_children++;
+ } /* end for */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5C__mark_flush_dep_dirty() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_clean()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming clean or having its flush_dep_ndirty_children
+ * reduced to 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 11/13/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry)
+{
+ unsigned u; /* Local index variable */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(entry);
+
+ /* Iterate over the parent entries, if any */
+ for(u = 0; u < entry->flush_dep_nparents; u++) {
+ /* Sanity check */
+ HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children > 0);
+
+ /* Adjust the parent's number of dirty children */
+ entry->flush_dep_parent[u]->flush_dep_ndirty_children--;
+ } /* end for */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5C__mark_flush_dep_clean() */
+
+#ifndef NDEBUG
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__assert_flush_dep_nocycle()
+ *
+ * Purpose: Assert recursively that base_entry is not the same as
+ * entry, and perform the same assertion on all of entry's
+ * flush dependency parents. This is used to detect cycles
+ * created by flush dependencies.
+ *
+ * Return: void
+ *
+ * Programmer: Neil Fortner
+ * 12/10/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
+ const H5C_cache_entry_t * base_entry)
+{
+ unsigned u; /* Local index variable */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(base_entry);
+
+ /* Make sure the entries are not the same */
+ HDassert(base_entry != entry);
+
+ /* Iterate over entry's parents (if any) */
+ for(u = 0; u < entry->flush_dep_nparents; u++)
+ H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry);
+
+ FUNC_LEAVE_NOAPI_VOID
+} /* H5C__assert_flush_dep_nocycle() */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C__generate_image
*
* Purpose: Serialize an entry and generate its image.
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index fe2be2f..cf2da3e 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -45,22 +45,13 @@
/* Package Private Macros */
/**************************/
-/* With the introduction of the fractal heap, it is now possible for
- * entries to be dirtied, resized, and/or moved in the flush callbacks.
- * As a result, on flushes, it may be necessary to make multiple passes
- * through the slist before it is empty. The H5C__MAX_PASSES_ON_FLUSH
- * #define is used to set an upper limit on the number of passes.
- * The current value was obtained via personal communication with
- * Quincey. I have applied a fudge factor of 2.
- *
- * -- JRM
- */
-#define H5C__MAX_PASSES_ON_FLUSH 4
-
/* Cache configuration settings */
#define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */
#define H5C__H5C_T_MAGIC 0x005CAC0E
+/* Initial allocated size of the "flush_dep_parent" array */
+#define H5C_FLUSH_DEP_PARENT_INIT 8
+
/* Cache client ID for epoch markers */
/* Note that H5C__MAX_EPOCH_MARKERS is defined in H5Cprivate.h, not here because
* it is needed to dimension arrays in H5C_t.
@@ -4063,11 +4054,6 @@ if ( ( (entry_ptr) == NULL ) || \
* field is intended to allow marking of output of with
* the processes mpi rank.
*
- * get_entry_ptr_from_addr_counter: Counter used to track the number of
- * times the H5C_get_entry_ptr_from_addr() function has been
- * called successfully. This field is only defined when
- * NDEBUG is not #defined.
- *
****************************************************************************/
struct H5C_t {
uint32_t magic;
@@ -4252,10 +4238,6 @@ struct H5C_t {
#endif /* H5C_COLLECT_CACHE_STATS */
char prefix[H5C__PREFIX_LEN];
-
-#ifndef NDEBUG
- int64_t get_entry_ptr_from_addr_counter;
-#endif /* NDEBUG */
};
#ifdef H5_HAVE_PARALLEL
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 8f88ebd..4137edf 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -97,14 +97,6 @@
#define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024))
#define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024))
-/* Maximum height of flush dependency relationships between entries. This is
- * currently tuned to the extensible array (H5EA) data structure, which only
- * requires 6 levels of dependency (i.e. heights 0-6) (actually, the extensible
- * array needs 4 levels, plus another 2 levels are needed: one for the layer
- * under the extensible array and one for the layer above it).
- */
-#define H5C__NUM_FLUSH_DEP_HEIGHTS 6
-
/* Values for cache entry magic field */
#define H5C__H5C_CACHE_ENTRY_T_MAGIC 0x005CAC0A
#define H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC 0xDeadBeef
@@ -209,6 +201,7 @@
#define H5C__TAKE_OWNERSHIP_FLAG 0x0800
#define H5C__FLUSH_LAST_FLAG 0x1000
#define H5C__FLUSH_COLLECTIVELY_FLAG 0x2000
+#define H5C__EVICT_ALLOW_LAST_PINS_FLAG 0x4000
#define H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG 0x8000
/* Definitions for cache "tag" property */
@@ -1473,33 +1466,29 @@ typedef int H5C_ring_t;
*
* Fields supporting the 'flush dependency' feature:
*
- * Entries in the cache may have a 'flush dependency' on another entry in the
+ * Entries in the cache may have 'flush dependencies' on other entries in the
* cache. A flush dependency requires that all dirty child entries be flushed
* to the file before a dirty parent entry (of those child entries) can be
* flushed to the file. This can be used by cache clients to create data
* structures that allow Single-Writer/Multiple-Reader (SWMR) access for the
* data structure.
*
- * The leaf child entry will have a "height" of 0, with any parent entries
- * having a height of 1 greater than the maximum height of any of their child
- * entries (flush dependencies are allowed to create asymmetric trees of
- * relationships).
- *
- * flush_dep_parent: Pointer to the parent entry for an entry in a flush
- * dependency relationship.
+ * flush_dep_parent: Pointer to the array of flush dependency parent entries
+ * for this entry.
*
- * child_flush_dep_height_rc: An array of reference counts for child entries,
- * where the number of children of each height is tracked.
+ * flush_dep_nparents: Number of flush dependency parent entries for this
+ * entry, i.e. the number of valid elements in flush_dep_parent.
*
- * flush_dep_height: The height of the entry, which is one greater than the
- * maximum height of any of its child entries..
+ * flush_dep_parent_nalloc: The number of allocated elements in
+ * flush_dep_parent_nalloc.
*
- * pinned_from_client: Whether the entry was pinned by an explicit pin request
- * from a cache client.
+ * flush_dep_nchildren: Number of flush dependency children for this entry. If
+ * this field is nonzero, then this entry must be pinned and
+ * therefore cannot be evicted.
*
- * pinned_from_cache: Whether the entry was pinned implicitly as a
- * request of being a parent entry in a flush dependency
- * relationship.
+ * flush_dep_ndirty_children: Number of flush dependency children that are
+ * either dirty or have a nonzero flush_dep_ndirty_children. If
+ * this field is nonzero, then this entry cannot be flushed.
*
*
* Fields supporting the hash table:
@@ -1644,9 +1633,11 @@ typedef struct H5C_cache_entry_t {
H5C_ring_t ring;
/* fields supporting the 'flush dependency' feature: */
- struct H5C_cache_entry_t * flush_dep_parent;
- uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS];
- unsigned flush_dep_height;
+ struct H5C_cache_entry_t ** flush_dep_parent;
+ unsigned flush_dep_nparents;
+ unsigned flush_dep_parent_nalloc;
+ unsigned flush_dep_nchildren;
+ unsigned flush_dep_ndirty_children;
hbool_t pinned_from_client;
hbool_t pinned_from_cache;
@@ -2041,13 +2032,5 @@ H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t dxpl_id, int32_t ce_arra
haddr_t *ce_array_ptr);
#endif /* H5_HAVE_PARALLEL */
-#ifndef NDEBUG /* debugging functions */
-H5_DLL herr_t H5C_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr,
- void **entry_ptr_ptr);
-H5_DLL herr_t H5C_verify_entry_type(const H5F_t *f, haddr_t addr,
- const H5C_class_t *expected_type, hbool_t *in_cache_ptr,
- hbool_t *type_ok_ptr);
-#endif /* NDEBUG */
-
#endif /* !_H5Cprivate_H */
diff --git a/src/H5Cquery.c b/src/H5Cquery.c
index a693ec2..874b12f 100644
--- a/src/H5Cquery.c
+++ b/src/H5Cquery.c
@@ -277,9 +277,9 @@ H5C_get_entry_status(const H5F_t *f,
if(is_corked_ptr != NULL)
*is_corked_ptr = entry_ptr->is_corked;
if(is_flush_dep_parent_ptr != NULL)
- *is_flush_dep_parent_ptr = (entry_ptr->flush_dep_height > 0);
+ *is_flush_dep_parent_ptr = (entry_ptr->flush_dep_nchildren > 0);
if(is_flush_dep_child_ptr != NULL)
- *is_flush_dep_child_ptr = (entry_ptr->flush_dep_parent != NULL);
+ *is_flush_dep_child_ptr = (entry_ptr->flush_dep_nparents > 0);
} /* end else */
done:
diff --git a/src/H5EA.c b/src/H5EA.c
index 756eb93..350acc1 100644
--- a/src/H5EA.c
+++ b/src/H5EA.c
@@ -74,6 +74,11 @@ typedef herr_t (*H5EA__unprotect_func_t)(void *thing, hid_t dxpl_id,
/* Local Prototypes */
/********************/
+static herr_t
+H5EA__lookup_elmt(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, hbool_t will_extend,
+ unsigned thing_acc, void **thing, uint8_t **thing_elmt_buf,
+ hsize_t *thing_elmt_idx, H5EA__unprotect_func_t *thing_unprot_func);
+
/*********************/
/* Package Variables */
@@ -133,10 +138,6 @@ H5EA_create(H5F_t *f, hid_t dxpl_id, const H5EA_create_t *cparam, void *ctx_udat
H5EA_hdr_t *hdr = NULL; /* The extensible array header information */
haddr_t ea_addr; /* Array header address */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/*
* Check arguments.
*/
@@ -213,9 +214,6 @@ H5EA_open(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata))
HDassert(H5F_addr_defined(ea_addr));
/* Load the array header into memory */
-#ifdef QAK
-HDfprintf(stderr, "%s: ea_addr = %a\n", FUNC, ea_addr);
-#endif /* QAK */
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header, address = %llu", (unsigned long long)ea_addr)
@@ -272,10 +270,6 @@ H5EA_get_nelmts(const H5EA_t *ea, hsize_t *nelmts))
/* Local variables */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/*
* Check arguments.
*/
@@ -307,10 +301,6 @@ H5EA_get_addr(const H5EA_t *ea, haddr_t *addr))
/* Local variables */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/*
* Check arguments.
*/
@@ -325,7 +315,7 @@ END_FUNC(PRIV) /* end H5EA_get_addr() */
/*-------------------------------------------------------------------------
- * Function: H5EA_lookup_elmt
+ * Function: H5EA__lookup_elmt
*
* Purpose: Retrieve the metadata object and the element buffer for a
* given element in the array.
@@ -340,9 +330,9 @@ END_FUNC(PRIV) /* end H5EA_get_addr() */
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
-H5EA__lookup_elmt(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, unsigned thing_acc,
- void **thing, uint8_t **thing_elmt_buf, hsize_t *thing_elmt_idx,
- H5EA__unprotect_func_t *thing_unprot_func))
+H5EA__lookup_elmt(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, hbool_t will_extend,
+ unsigned thing_acc, void **thing, uint8_t **thing_elmt_buf,
+ hsize_t *thing_elmt_idx, H5EA__unprotect_func_t *thing_unprot_func))
/* Local variables */
H5EA_hdr_t *hdr = ea->hdr; /* Header for EA */
@@ -355,11 +345,6 @@ H5EA__lookup_elmt(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, unsigned thing_a
hbool_t stats_changed = FALSE; /* Whether array statistics changed */
hbool_t hdr_dirty = FALSE; /* Whether the array header changed */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
-#endif /* QAK */
-
/*
* Check arguments.
*/
@@ -383,9 +368,6 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
/* Check if we should create the index block */
if(!H5F_addr_defined(hdr->idx_blk_addr)) {
-#ifdef QAK
-HDfprintf(stderr, "%s: Index block address not defined!\n", FUNC, idx);
-#endif /* QAK */
/* Check if we are allowed to create the thing */
if(0 == (thing_acc & H5AC__READ_ONLY_FLAG)) { /* i.e. r/w access */
/* Create the index block */
@@ -397,9 +379,6 @@ HDfprintf(stderr, "%s: Index block address not defined!\n", FUNC, idx);
else
H5_LEAVE(SUCCEED)
} /* end if */
-#ifdef QAK
-HDfprintf(stderr, "%s: Index block address is: %a\n", FUNC, hdr->idx_blk_addr);
-#endif /* QAK */
/* Protect index block */
if(NULL == (iblock = H5EA__iblock_protect(hdr, dxpl_id, thing_acc)))
@@ -420,26 +399,14 @@ HDfprintf(stderr, "%s: Index block address is: %a\n", FUNC, hdr->idx_blk_addr);
/* Get super block index where element is located */
sblk_idx = H5EA__dblock_sblk_idx(hdr, idx);
-#ifdef QAK
-HDfprintf(stderr, "%s: sblk_idx = %u, iblock->nsblks = %Zu\n", FUNC, sblk_idx, iblock->nsblks);
-#endif /* QAK */
/* Adjust index to offset in super block */
elmt_idx = idx - (hdr->cparam.idx_blk_elmts + hdr->sblk_info[sblk_idx].start_idx);
-#ifdef QAK
-HDfprintf(stderr, "%s: after adjusting for super block elements, elmt_idx = %Hu\n", FUNC, elmt_idx);
-#endif /* QAK */
/* Check for data block containing element address in the index block */
if(sblk_idx < iblock->nsblks) {
-#ifdef QAK
-HDfprintf(stderr, "%s: Element in data block pointed to by address in index block\n", FUNC);
-#endif /* QAK */
/* Compute the data block index in index block */
dblk_idx = (size_t)(hdr->sblk_info[sblk_idx].start_dblk + (elmt_idx / hdr->sblk_info[sblk_idx].dblk_nelmts));
-#ifdef QAK
-HDfprintf(stderr, "%s: dblk_idx = %u, iblock->ndblk_addrs = %Zu\n", FUNC, dblk_idx, iblock->ndblk_addrs);
-#endif /* QAK */
HDassert(dblk_idx < iblock->ndblk_addrs);
/* Check if the data block has been allocated on disk yet */
@@ -470,6 +437,13 @@ HDfprintf(stderr, "%s: dblk_idx = %u, iblock->ndblk_addrs = %Zu\n", FUNC, dblk_i
/* Adjust index to offset in data block */
elmt_idx %= hdr->sblk_info[sblk_idx].dblk_nelmts;
+ /* Check if there is already a dependency on the header */
+ if(will_extend && !dblock->has_hdr_depend) {
+ if(H5EA__create_flush_depend((H5AC_info_t *)hdr, (H5AC_info_t *)dblock) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between data block and header, index = %llu", (unsigned long long)idx)
+ dblock->has_hdr_depend = TRUE;
+ } /* end if */
+
/* Set 'thing' info to refer to the data block */
*thing = dblock;
*thing_elmt_buf = (uint8_t *)dblock->elmts;
@@ -490,9 +464,6 @@ HDfprintf(stderr, "%s: dblk_idx = %u, iblock->ndblk_addrs = %Zu\n", FUNC, dblk_i
/* Create super block */
sblk_addr = H5EA__sblock_create(hdr, dxpl_id, iblock, &stats_changed, sblk_idx);
-#ifdef QAK
-HDfprintf(stderr, "%s: New super block address is: %a\n", FUNC, sblk_addr);
-#endif /* QAK */
if(!H5F_addr_defined(sblk_addr))
H5E_THROW(H5E_CANTCREATE, "unable to create extensible array super block")
@@ -510,9 +481,6 @@ HDfprintf(stderr, "%s: New super block address is: %a\n", FUNC, sblk_addr);
/* Compute the data block index in super block */
dblk_idx = (size_t)(elmt_idx / sblock->dblk_nelmts);
-#ifdef QAK
-HDfprintf(stderr, "%s: dblk_idx = %u, sblock->ndblks = %Zu\n", FUNC, dblk_idx, sblock->ndblks);
-#endif /* QAK */
HDassert(dblk_idx < sblock->ndblks);
/* Check if the data block has been allocated on disk yet */
@@ -531,21 +499,20 @@ HDfprintf(stderr, "%s: dblk_idx = %u, sblock->ndblks = %Zu\n", FUNC, dblk_idx, s
/* Set data block address in index block */
sblock->dblk_addrs[dblk_idx] = dblk_addr;
sblock_cache_flags |= H5AC__DIRTIED_FLAG;
+
+ /* Create flush dependency on header, if extending the array and one doesn't already exist */
+ if(will_extend && !sblock->has_hdr_depend) {
+ if(H5EA__create_flush_depend((H5AC_info_t *)sblock->hdr, (H5AC_info_t *)sblock) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between super block and header, address = %llu", (unsigned long long)sblock->addr)
+ sblock->has_hdr_depend = TRUE;
+ } /* end if */
} /* end if */
else
H5_LEAVE(SUCCEED)
} /* end if */
-#ifdef QAK
-if(sblock->dblk_npages)
- HDfprintf(stderr, "%s: Check 1.0: elmt_idx = %Hu\n", FUNC, elmt_idx);
-#endif /* QAK */
/* Adjust index to offset in data block */
elmt_idx %= sblock->dblk_nelmts;
-#ifdef QAK
-if(sblock->dblk_npages)
- HDfprintf(stderr, "%s: Check 2.0: elmt_idx = %Hu\n", FUNC, elmt_idx);
-#endif /* QAK */
/* Check if the data block is paged */
if(sblock->dblk_npages) {
@@ -566,14 +533,6 @@ if(sblock->dblk_npages)
dblk_page_addr = sblock->dblk_addrs[dblk_idx] +
H5EA_DBLOCK_PREFIX_SIZE(sblock) +
(page_idx * sblock->dblk_page_size);
-#ifdef QAK
-HDfprintf(stderr, "%s: sblock->addr = %a\n", FUNC, sblock->addr);
-HDfprintf(stderr, "%s: sblock->dblk_addrs[%Zu] = %a\n", FUNC, dblk_idx, sblock->dblk_addrs[dblk_idx]);
-HDfprintf(stderr, "%s: H5EA_DBLOCK_PREFIX_SIZE(sblock) = %u\n", FUNC, (unsigned)H5EA_DBLOCK_PREFIX_SIZE(sblock));
-HDfprintf(stderr, "%s: sblock->page_init[%Zu] = %t\n", FUNC, page_init_idx, H5VM_bit_get(sblock->page_init, page_init_idx));
-HDfprintf(stderr, "%s: page_idx = %Zu, elmt_idx = %Hu, dblk_page_addr = %a\n", FUNC, page_idx, elmt_idx, dblk_page_addr);
-HDfprintf(stderr, "%s: sblock->dblk_page_size = %Zu\n", FUNC, sblock->dblk_page_size);
-#endif /* QAK */
/* Check if page has been initialized yet */
if(!H5VM_bit_get(sblock->page_init, page_init_idx)) {
@@ -595,6 +554,13 @@ HDfprintf(stderr, "%s: sblock->dblk_page_size = %Zu\n", FUNC, sblock->dblk_page_
if(NULL == (dblk_page = H5EA__dblk_page_protect(hdr, dxpl_id, sblock, dblk_page_addr, thing_acc)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block page, address = %llu", (unsigned long long)dblk_page_addr)
+ /* Check if there is already a dependency on the header */
+ if(will_extend && !dblk_page->has_hdr_depend) {
+ if(H5EA__create_flush_depend((H5AC_info_t *)hdr, (H5AC_info_t *)dblk_page) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between data block page and header, index = %llu", (unsigned long long)idx)
+ dblk_page->has_hdr_depend = TRUE;
+ } /* end if */
+
/* Set 'thing' info to refer to the data block page */
*thing = dblk_page;
*thing_elmt_buf = (uint8_t *)dblk_page->elmts;
@@ -606,6 +572,13 @@ HDfprintf(stderr, "%s: sblock->dblk_page_size = %Zu\n", FUNC, sblock->dblk_page_
if(NULL == (dblock = H5EA__dblock_protect(hdr, dxpl_id, sblock, sblock->dblk_addrs[dblk_idx], sblock->dblk_nelmts, thing_acc)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block, address = %llu", (unsigned long long)sblock->dblk_addrs[dblk_idx])
+ /* Check if there is already a dependency on the header */
+ if(will_extend && !dblock->has_hdr_depend) {
+ if(H5EA__create_flush_depend((H5AC_info_t *)hdr, (H5AC_info_t *)dblock) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between data block and header, index = %llu", (unsigned long long)idx)
+ dblock->has_hdr_depend = TRUE;
+ } /* end if */
+
/* Set 'thing' info to refer to the data block */
*thing = dblock;
*thing_elmt_buf = (uint8_t *)dblock->elmts;
@@ -615,6 +588,10 @@ HDfprintf(stderr, "%s: sblock->dblk_page_size = %Zu\n", FUNC, sblock->dblk_page_
} /* end else */
} /* end else */
+ /* Sanity checks */
+ HDassert(*thing != NULL);
+ HDassert(*thing_unprot_func != NULL);
+
CATCH
/* Reset 'thing' info on error */
if(ret_value < 0) {
@@ -644,7 +621,7 @@ CATCH
if(dblk_page && *thing != dblk_page && H5EA__dblk_page_unprotect(dblk_page, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array data block page")
-END_FUNC(STATIC) /* end H5EA_lookup_elmt() */
+END_FUNC(STATIC) /* end H5EA__lookup_elmt() */
/*-------------------------------------------------------------------------
@@ -670,13 +647,9 @@ H5EA_set(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, const void *elmt))
uint8_t *thing_elmt_buf; /* Pointer to the element buffer for the array metadata */
hsize_t thing_elmt_idx; /* Index of the element in the element buffer for the array metadata */
H5EA__unprotect_func_t thing_unprot_func; /* Function pointer for unprotecting the array metadata */
+ hbool_t will_extend; /* Flag indicating if setting the element will extend the array */
unsigned thing_cache_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting array metadata */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
-#endif /* QAK */
-
/*
* Check arguments.
*/
@@ -687,7 +660,8 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
hdr->f = ea->f;
/* Look up the array metadata containing the element we want to set */
- if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__NO_FLAGS_SET, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
+ will_extend = (idx >= hdr->stats.stored.max_idx_set);
+ if(H5EA__lookup_elmt(ea, dxpl_id, idx, will_extend, H5AC__NO_FLAGS_SET, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
/* Sanity check */
@@ -700,10 +674,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
thing_cache_flags |= H5AC__DIRTIED_FLAG;
/* Update max. element set in array, if appropriate */
-#ifdef QAK
-HDfprintf(stderr, "%s: idx = %Hu, hdr->stats.max_idx_set = %Hu\n", FUNC, idx, hdr->stats.max_idx_set);
-#endif /* QAK */
- if(idx >= hdr->stats.stored.max_idx_set) {
+ if(will_extend) {
/* Update the max index for the array */
hdr->stats.stored.max_idx_set = idx + 1;
if(H5EA__hdr_modified(hdr) < 0)
@@ -740,11 +711,6 @@ H5EA_get(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, void *elmt))
void *thing = NULL; /* Pointer to the array metadata containing the array index we are interested in */
H5EA__unprotect_func_t thing_unprot_func; /* Function pointer for unprotecting the array metadata */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
-#endif /* QAK */
-
/*
* Check arguments.
*/
@@ -753,9 +719,6 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
/* Check for element beyond max. element in array */
if(idx >= hdr->stats.stored.max_idx_set) {
-#ifdef QAK
-HDfprintf(stderr, "%s: Element beyond max. index set, hdr->stats.max_idx_set = %Hu, idx = %Hu\n", FUNC, hdr->stats.max_idx_set, idx);
-#endif /* QAK */
/* Call the class's 'fill' callback */
if((hdr->cparam.cls->fill)(elmt, (size_t)1) < 0)
H5E_THROW(H5E_CANTSET, "can't set element to class's fill value")
@@ -764,15 +727,11 @@ HDfprintf(stderr, "%s: Element beyond max. index set, hdr->stats.max_idx_set = %
uint8_t *thing_elmt_buf; /* Pointer to the element buffer for the array metadata */
hsize_t thing_elmt_idx; /* Index of the element in the element buffer for the array metadata */
-#ifdef QAK
-HDfprintf(stderr, "%s: Index block address is: %a\n", FUNC, hdr->idx_blk_addr);
-#endif /* QAK */
-
/* Set the shared array header's file context for this operation */
hdr->f = ea->f;
/* Look up the array metadata containing the element we want to set */
- if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__READ_ONLY_FLAG, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
+ if(H5EA__lookup_elmt(ea, dxpl_id, idx, FALSE, H5AC__READ_ONLY_FLAG, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
/* Check if the thing holding the element has been created yet */
@@ -815,10 +774,6 @@ H5EA_depend(H5AC_info_t *parent_entry, H5EA_t *ea))
/* Local variables */
H5EA_hdr_t *hdr = ea->hdr; /* Header for EA */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/*
* Check arguments.
*/
@@ -838,49 +793,6 @@ END_FUNC(PRIV) /* end H5EA_depend() */
/*-------------------------------------------------------------------------
- * Function: H5EA_undepend
- *
- * Purpose: Remove a child flush dependency between the extensible array's
- * header and another piece of metadata in the file.
- *
- * Return: SUCCEED/FAIL
- *
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * May 27 2009
- *
- *-------------------------------------------------------------------------
- */
-BEGIN_FUNC(PRIV, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA_undepend(H5AC_info_t *parent_entry, H5EA_t *ea))
-
- /* Local variables */
- H5EA_hdr_t *hdr = ea->hdr; /* Header for EA */
-
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
- /*
- * Check arguments.
- */
- HDassert(ea);
- HDassert(hdr);
-
- /* Set the shared array header's file context for this operation */
- hdr->f = ea->f;
-
- /* Remove flush dependency between parent entry and extensible array header */
- if(H5EA__destroy_flush_depend(parent_entry, (H5AC_info_t *)hdr) < 0)
- H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency on file metadata")
-
-CATCH
-
-END_FUNC(PRIV) /* end H5EA_undepend() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5EA_close
*
* Purpose: Close an extensible array
@@ -901,10 +813,6 @@ H5EA_close(H5EA_t *ea, hid_t dxpl_id))
hbool_t pending_delete = FALSE; /* Whether the array is pending deletion */
haddr_t ea_addr = HADDR_UNDEF; /* Address of array (for deletion) */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/*
* Check arguments.
*/
@@ -1009,9 +917,6 @@ H5EA_delete(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata))
HDassert(H5F_addr_defined(ea_addr));
/* Lock the array header into memory */
-#ifdef QAK
-HDfprintf(stderr, "%s: ea_addr = %a\n", FUNC, ea_addr);
-#endif /* QAK */
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array header, address = %llu", (unsigned long long)ea_addr)
diff --git a/src/H5EAcache.c b/src/H5EAcache.c
index e9c93d4..9224916 100644
--- a/src/H5EAcache.c
+++ b/src/H5EAcache.c
@@ -1179,13 +1179,25 @@ H5EA__cache_sblock_notify(H5AC_notify_action_t action, void *_thing))
break;
case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
- /* do nothing */
+ /* Destroy flush dependency on extensible array header, if set */
+ if(sblock->has_hdr_depend) {
+ if(H5EA__destroy_flush_depend((H5AC_info_t *)sblock->hdr, (H5AC_info_t *)sblock) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between super block and header, address = %llu", (unsigned long long)sblock->addr)
+ sblock->has_hdr_depend = FALSE;
+ } /* end if */
break;
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
/* Destroy flush dependency on index block */
if(H5EA__destroy_flush_depend((H5AC_info_t *)sblock->parent, (H5AC_info_t *)sblock) < 0)
H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between super block and index block, address = %llu", (unsigned long long)sblock->addr)
+
+ /* Destroy flush dependency on extensible array header, if set */
+ if(sblock->has_hdr_depend) {
+ if(H5EA__destroy_flush_depend((H5AC_info_t *)sblock->hdr, (H5AC_info_t *)sblock) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between super block and header, address = %llu", (unsigned long long)sblock->addr)
+ sblock->has_hdr_depend = FALSE;
+ } /* end if */
break;
default:
@@ -1534,13 +1546,25 @@ H5EA__cache_dblock_notify(H5AC_notify_action_t action, void *_thing))
break;
case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
- /* do nothing */
+ /* Destroy flush dependency on extensible array header, if set */
+ if(dblock->has_hdr_depend) {
+ if(H5EA__destroy_flush_depend((H5AC_info_t *)dblock->hdr, (H5AC_info_t *)dblock) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between direct block and header, address = %llu", (unsigned long long)dblock->addr)
+ dblock->has_hdr_depend = FALSE;
+ } /* end if */
break;
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
/* Destroy flush dependency on parent */
if(H5EA__destroy_flush_depend((H5AC_info_t *)dblock->parent, (H5AC_info_t *)dblock) < 0)
H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block and parent, address = %llu", (unsigned long long)dblock->addr)
+
+ /* Destroy flush dependency on extensible array header, if set */
+ if(dblock->has_hdr_depend) {
+ if(H5EA__destroy_flush_depend((H5AC_info_t *)dblock->hdr, (H5AC_info_t *)dblock) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between direct block and header, address = %llu", (unsigned long long)dblock->addr)
+ dblock->has_hdr_depend = FALSE;
+ } /* end if */
break;
default:
@@ -1856,13 +1880,25 @@ H5EA__cache_dblk_page_notify(H5AC_notify_action_t action, void *_thing))
break;
case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
- /* do nothing */
+ /* Destroy flush dependency on extensible array header, if set */
+ if(dblk_page->has_hdr_depend) {
+ if(H5EA__destroy_flush_depend((H5AC_info_t *)dblk_page->hdr, (H5AC_info_t *)dblk_page) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block page and header, address = %llu", (unsigned long long)dblk_page->addr)
+ dblk_page->has_hdr_depend = FALSE;
+ } /* end if */
break;
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
/* Destroy flush dependency on parent */
if(H5EA__destroy_flush_depend((H5AC_info_t *)dblk_page->parent, (H5AC_info_t *)dblk_page) < 0)
H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block page and parent, address = %llu", (unsigned long long)dblk_page->addr)
+
+ /* Destroy flush dependency on extensible array header, if set */
+ if(dblk_page->has_hdr_depend) {
+ if(H5EA__destroy_flush_depend((H5AC_info_t *)dblk_page->hdr, (H5AC_info_t *)dblk_page) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block page and header, address = %llu", (unsigned long long)dblk_page->addr)
+ dblk_page->has_hdr_depend = FALSE;
+ } /* end if */
break;
default:
diff --git a/src/H5EAdblkpage.c b/src/H5EAdblkpage.c
index 9f3b39a..327feb0 100644
--- a/src/H5EAdblkpage.c
+++ b/src/H5EAdblkpage.c
@@ -156,10 +156,6 @@ H5EA__dblk_page_create(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_sblock_t *parent,
/* Local variables */
H5EA_dblk_page_t *dblk_page = NULL; /* Extensible array data block page */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called, addr = %a\n", FUNC, addr);
-#endif /* QAK */
-
/* Sanity check */
HDassert(hdr);
@@ -170,9 +166,6 @@ HDfprintf(stderr, "%s: Called, addr = %a\n", FUNC, addr);
/* Set info about data block page on disk */
dblk_page->addr = addr;
dblk_page->size = H5EA_DBLK_PAGE_SIZE(hdr);
-#ifdef QAK
-HDfprintf(stderr, "%s: dblk_page->size = %Zu\n", FUNC, dblk_page->size);
-#endif /* QAK */
/* Clear any elements in data block page to fill value */
if((hdr->cparam.cls->fill)(dblk_page->elmts, (size_t)hdr->dblk_page_nelmts) < 0)
@@ -215,10 +208,6 @@ H5EA__dblk_page_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_sblock_t *parent,
/* Local variables */
H5EA_dblk_page_cache_ud_t udata; /* Information needed for loading data block page */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/* Sanity check */
HDassert(hdr);
HDassert(H5F_addr_defined(dblk_page_addr));
@@ -261,10 +250,6 @@ H5EA__dblk_page_unprotect(H5EA_dblk_page_t *dblk_page, hid_t dxpl_id,
/* Local variables */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/* Sanity check */
HDassert(dblk_page);
@@ -296,6 +281,7 @@ H5EA__dblk_page_dest(H5EA_dblk_page_t *dblk_page))
/* Sanity check */
HDassert(dblk_page);
+ HDassert(!dblk_page->has_hdr_depend);
/* Check if header field has been initialized */
if(dblk_page->hdr) {
diff --git a/src/H5EAdblock.c b/src/H5EAdblock.c
index 3eaaf1c..9511a9d 100644
--- a/src/H5EAdblock.c
+++ b/src/H5EAdblock.c
@@ -169,10 +169,6 @@ H5EA__dblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent,
H5EA_dblock_t *dblock = NULL; /* Extensible array data block */
haddr_t dblock_addr; /* Extensible array data block address */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called, hdr->dblk_page_nelmts = %Zu, nelmts = %Zu\n", FUNC, hdr->dblk_page_nelmts, nelmts);
-#endif /* QAK */
-
/* Sanity check */
HDassert(hdr);
HDassert(stats_changed);
@@ -184,15 +180,9 @@ HDfprintf(stderr, "%s: Called, hdr->dblk_page_nelmts = %Zu, nelmts = %Zu\n", FUN
/* Set size of data block on disk */
dblock->size = H5EA_DBLOCK_SIZE(dblock);
-#ifdef QAK
-HDfprintf(stderr, "%s: dblock->size = %Zu\n", FUNC, dblock->size);
-#endif /* QAK */
/* Set offset of block in array's address space */
dblock->block_off = dblk_off;
-#ifdef QAK
-HDfprintf(stderr, "%s: dblock->block_off = %Hu\n", FUNC, dblock->block_off);
-#endif /* QAK */
/* Allocate space for the data block on disk */
if(HADDR_UNDEF == (dblock_addr = H5MF_alloc(hdr->f, H5FD_MEM_EARRAY_DBLOCK, dxpl_id, (hsize_t)dblock->size)))
@@ -262,25 +252,12 @@ H5EA__dblock_sblk_idx(const H5EA_hdr_t *hdr, hsize_t idx))
HDassert(hdr);
HDassert(idx >= hdr->cparam.idx_blk_elmts);
-#ifdef QAK
-HDfprintf(stderr, "%s: Entering - idx = %Hu\n", FUNC, idx);
-#endif /* QAK */
/* Adjust index for elements in index block */
idx -= hdr->cparam.idx_blk_elmts;
-#ifdef QAK
-HDfprintf(stderr, "%s: after adjusting for index block elements, idx = %Hu\n", FUNC, idx);
-#endif /* QAK */
/* Determine the superblock information for the index */
H5_CHECK_OVERFLOW(idx, /*From:*/hsize_t, /*To:*/uint64_t);
-#ifdef QAK
-HDfprintf(stderr, "%s: hdr->cparam.data_blk_min_elmts = %u\n", FUNC, (unsigned)hdr->cparam.data_blk_min_elmts);
-#endif /* QAK */
sblk_idx = H5VM_log2_gen((uint64_t)((idx / hdr->cparam.data_blk_min_elmts) + 1));
-#ifdef QAK
-HDfprintf(stderr, "%s: sblk_idx = %u\n", FUNC, sblk_idx);
-HDfprintf(stderr, "%s: hdr->sblk_info[%u] = {%Hu, %Zu, %Hu, %Hu}\n", FUNC, sblk_idx, hdr->sblk_info[sblk_idx].ndblks, hdr->sblk_info[sblk_idx].dblk_nelmts, hdr->sblk_info[sblk_idx].start_idx, hdr->sblk_info[sblk_idx].start_dblk);
-#endif /* QAK */
/* Set return value */
ret_value = sblk_idx;
@@ -309,10 +286,6 @@ H5EA__dblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent,
/* Local variables */
H5EA_dblock_cache_ud_t udata; /* Information needed for loading data block */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/* Sanity check */
HDassert(hdr);
HDassert(H5F_addr_defined(dblk_addr));
@@ -355,10 +328,6 @@ H5EA__dblock_unprotect(H5EA_dblock_t *dblock, hid_t dxpl_id, unsigned cache_flag
/* Local variables */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/* Sanity check */
HDassert(dblock);
@@ -392,10 +361,6 @@ H5EA__dblock_delete(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent,
/* Local variables */
H5EA_dblock_t *dblock = NULL; /* Pointer to data block */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/* Sanity check */
HDassert(hdr);
HDassert(parent);
@@ -420,16 +385,10 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Iterate over pages in data block */
for(u = 0; u < npages; u++) {
-#ifdef QAK
-HDfprintf(stderr, "%s: Expunging data block page from cache\n", FUNC);
-#endif /* QAK */
/* Evict the data block page from the metadata cache */
/* (OK to call if it doesn't exist in the cache) */
if(H5AC_expunge_entry(hdr->f, dxpl_id, H5AC_EARRAY_DBLK_PAGE, dblk_page_addr, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTEXPUNGE, "unable to remove array data block page from metadata cache")
-#ifdef QAK
-HDfprintf(stderr, "%s: Done expunging data block page from cache\n", FUNC);
-#endif /* QAK */
/* Advance to next page address */
dblk_page_addr += dblk_page_size;
@@ -463,6 +422,7 @@ H5EA__dblock_dest(H5EA_dblock_t *dblock))
/* Sanity check */
HDassert(dblock);
+ HDassert(!dblock->has_hdr_depend);
/* Check if shared header field has been initialized */
if(dblock->hdr) {
diff --git a/src/H5EApkg.h b/src/H5EApkg.h
index 2e13694..6f3ee9c 100644
--- a/src/H5EApkg.h
+++ b/src/H5EApkg.h
@@ -241,6 +241,7 @@ typedef struct H5EA_sblock_t {
/* Internal array information (not stored) */
H5EA_hdr_t *hdr; /* Shared array header info */
+ hbool_t has_hdr_depend; /* Whether this object has a flush dependency on the header */
H5EA_iblock_t *parent; /* Parent object for super block (index block) */
haddr_t addr; /* Address of this index block on disk */
size_t size; /* Size of index block on disk */
@@ -265,6 +266,7 @@ typedef struct H5EA_dblock_t {
/* Internal array information (not stored) */
H5EA_hdr_t *hdr; /* Shared array header info */
+ hbool_t has_hdr_depend; /* Whether this object has a flush dependency on the header */
void *parent; /* Parent object for data block (index or super block) */
haddr_t addr; /* Address of this data block on disk */
size_t size; /* Size of data block on disk */
@@ -284,6 +286,7 @@ typedef struct H5EA_dbk_page_t {
/* Internal array information (not stored) */
H5EA_hdr_t *hdr; /* Shared array header info */
+ hbool_t has_hdr_depend; /* Whether this object has a flush dependency on the header */
H5EA_sblock_t *parent; /* Parent object for data block page (super block) */
haddr_t addr; /* Address of this data block page on disk */
size_t size; /* Size of data block page on disk */
diff --git a/src/H5EAsblock.c b/src/H5EAsblock.c
index f775484..4f153cd 100644
--- a/src/H5EAsblock.c
+++ b/src/H5EAsblock.c
@@ -133,9 +133,6 @@ H5EA__sblock_alloc(H5EA_hdr_t *hdr, H5EA_iblock_t *parent, unsigned sblk_idx))
sblock->ndblks = hdr->sblk_info[sblk_idx].ndblks;
HDassert(sblock->ndblks);
sblock->dblk_nelmts = hdr->sblk_info[sblk_idx].dblk_nelmts;
-#ifdef QAK
-HDfprintf(stderr, "%s: hdr->dblk_page_nelmts = %Zu, sblock->ndblks = %Zu, sblock->dblk_nelmts = %Zu\n", FUNC, hdr->dblk_page_nelmts, sblock->ndblks, sblock->dblk_nelmts);
-#endif /* QAK */
/* Allocate buffer for data block addresses in super block */
if(NULL == (sblock->dblk_addrs = H5FL_SEQ_MALLOC(haddr_t, sblock->ndblks)))
@@ -199,10 +196,6 @@ H5EA__sblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_iblock_t *parent,
haddr_t sblock_addr; /* Extensible array super block address */
haddr_t tmp_addr = HADDR_UNDEF; /* Address value to fill data block addresses with */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/* Sanity check */
HDassert(hdr);
HDassert(stats_changed);
@@ -213,15 +206,9 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Set size of super block on disk */
sblock->size = H5EA_SBLOCK_SIZE(sblock);
-#ifdef QAK
-HDfprintf(stderr, "%s: sblock->size = %Zu\n", FUNC, sblock->size);
-#endif /* QAK */
/* Set offset of block in array's address space */
sblock->block_off = hdr->sblk_info[sblk_idx].start_idx;
-#ifdef QAK
-HDfprintf(stderr, "%s: sblock->block_off = %Hu\n", FUNC, sblock->block_off);
-#endif /* QAK */
/* Allocate space for the super block on disk */
if(HADDR_UNDEF == (sblock_addr = H5MF_alloc(hdr->f, H5FD_MEM_EARRAY_SBLOCK, dxpl_id, (hsize_t)sblock->size)))
@@ -281,11 +268,6 @@ H5EA__sblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_iblock_t *parent,
/* Local variables */
H5EA_sblock_cache_ud_t udata; /* Information needed for loading super block */
-
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/* Sanity check */
HDassert(hdr);
HDassert(H5F_addr_defined(sblk_addr));
@@ -327,10 +309,6 @@ H5EA__sblock_unprotect(H5EA_sblock_t *sblock, hid_t dxpl_id, unsigned cache_flag
/* Local variables */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/* Sanity check */
HDassert(sblock);
@@ -365,10 +343,6 @@ H5EA__sblock_delete(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_iblock_t *parent,
H5EA_sblock_t *sblock = NULL; /* Pointer to super block */
size_t u; /* Local index variable */
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-#endif /* QAK */
-
/* Sanity check */
HDassert(hdr);
HDassert(H5F_addr_defined(sblk_addr));
@@ -415,9 +389,7 @@ H5EA__sblock_dest(H5EA_sblock_t *sblock))
/* Sanity check */
HDassert(sblock);
-#ifdef QAK
-HDfprintf(stderr, "%s: sblock->hdr->dblk_page_nelmts = %Zu, sblock->ndblks = %Zu, sblock->dblk_nelmts = %Zu\n", FUNC, sblock->hdr->dblk_page_nelmts, sblock->ndblks, sblock->dblk_nelmts);
-#endif /* QAK */
+ HDassert(!sblock->has_hdr_depend);
/* Check if shared header field has been initialized */
if(sblock->hdr) {
diff --git a/src/H5FD.c b/src/H5FD.c
index 2a15fe8..57905b1 100644
--- a/src/H5FD.c
+++ b/src/H5FD.c
@@ -1608,12 +1608,12 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5FDflush(H5FD_t *file, hid_t dxpl_id, unsigned closing)
+H5FDflush(H5FD_t *file, hid_t dxpl_id, hbool_t closing)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
- H5TRACE3("e", "*xiIu", file, dxpl_id, closing);
+ H5TRACE3("e", "*xib", file, dxpl_id, closing);
/* Check args */
if(!file || !file->cls)
@@ -1647,7 +1647,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5FD_flush(H5FD_t *file, hid_t dxpl_id, unsigned closing)
+H5FD_flush(H5FD_t *file, hid_t dxpl_id, hbool_t closing)
{
herr_t ret_value = SUCCEED; /* Return value */
diff --git a/src/H5FDcore.c b/src/H5FDcore.c
index 865a29e..5ce6560 100644
--- a/src/H5FDcore.c
+++ b/src/H5FDcore.c
@@ -139,7 +139,7 @@ static herr_t H5FD__core_read(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, had
size_t size, void *buf);
static herr_t H5FD__core_write(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, haddr_t addr,
size_t size, const void *buf);
-static herr_t H5FD__core_flush(H5FD_t *_file, hid_t dxpl_id, unsigned closing);
+static herr_t H5FD__core_flush(H5FD_t *_file, hid_t dxpl_id, hbool_t closing);
static herr_t H5FD__core_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing);
static herr_t H5FD_core_lock(H5FD_t *_file, hbool_t rw);
static herr_t H5FD_core_unlock(H5FD_t *_file);
@@ -1335,7 +1335,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5FD__core_flush(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, unsigned H5_ATTR_UNUSED closing)
+H5FD__core_flush(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, hbool_t H5_ATTR_UNUSED closing)
{
H5FD_core_t *file = (H5FD_core_t*)_file;
herr_t ret_value = SUCCEED; /* Return value */
diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c
index b92a685..3b38836 100644
--- a/src/H5FDfamily.c
+++ b/src/H5FDfamily.c
@@ -104,8 +104,8 @@ static herr_t H5FD_family_read(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, ha
size_t size, void *_buf/*out*/);
static herr_t H5FD_family_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
size_t size, const void *_buf);
-static herr_t H5FD_family_flush(H5FD_t *_file, hid_t dxpl_id, unsigned closing);
-static herr_t H5FD_family_truncate(H5FD_t *_file, hid_t dxpl_id, unsigned closing);
+static herr_t H5FD_family_flush(H5FD_t *_file, hid_t dxpl_id, hbool_t closing);
+static herr_t H5FD_family_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing);
static herr_t H5FD_family_lock(H5FD_t *_file, hbool_t rw);
static herr_t H5FD_family_unlock(H5FD_t *_file);
@@ -1249,7 +1249,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5FD_family_flush(H5FD_t *_file, hid_t dxpl_id, unsigned closing)
+H5FD_family_flush(H5FD_t *_file, hid_t dxpl_id, hbool_t closing)
{
H5FD_family_t *file = (H5FD_family_t*)_file;
unsigned u, nerrors = 0;
@@ -1284,7 +1284,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5FD_family_truncate(H5FD_t *_file, hid_t dxpl_id, unsigned closing)
+H5FD_family_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing)
{
H5FD_family_t *file = (H5FD_family_t*)_file;
unsigned u, nerrors = 0;
diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c
index bf27b1a..0e49813 100644
--- a/src/H5FDmulti.c
+++ b/src/H5FDmulti.c
@@ -133,7 +133,7 @@ static herr_t H5FD_multi_read(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, had
size_t size, void *_buf/*out*/);
static herr_t H5FD_multi_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
size_t size, const void *_buf);
-static herr_t H5FD_multi_flush(H5FD_t *_file, hid_t dxpl_id, unsigned closing);
+static herr_t H5FD_multi_flush(H5FD_t *_file, hid_t dxpl_id, hbool_t closing);
static herr_t H5FD_multi_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing);
static herr_t H5FD_multi_lock(H5FD_t *_file, hbool_t rw);
static herr_t H5FD_multi_unlock(H5FD_t *_file);
@@ -1686,7 +1686,7 @@ H5FD_multi_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
*-------------------------------------------------------------------------
*/
static herr_t
-H5FD_multi_flush(H5FD_t *_file, hid_t dxpl_id, unsigned closing)
+H5FD_multi_flush(H5FD_t *_file, hid_t dxpl_id, hbool_t closing)
{
H5FD_multi_t *file = (H5FD_multi_t*)_file;
H5FD_mem_t mt;
diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h
index 639f3eb..915f7e1 100644
--- a/src/H5FDprivate.h
+++ b/src/H5FDprivate.h
@@ -158,7 +158,7 @@ H5_DLL herr_t H5FD_read(H5FD_t *file, const H5P_genplist_t *dxpl, H5FD_mem_t typ
haddr_t addr, size_t size, void *buf/*out*/);
H5_DLL herr_t H5FD_write(H5FD_t *file, const H5P_genplist_t *dxpl, H5FD_mem_t type,
haddr_t addr, size_t size, const void *buf);
-H5_DLL herr_t H5FD_flush(H5FD_t *file, hid_t dxpl_id, unsigned closing);
+H5_DLL herr_t H5FD_flush(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
H5_DLL herr_t H5FD_truncate(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
H5_DLL herr_t H5FD_lock(H5FD_t *file, hbool_t rw);
H5_DLL herr_t H5FD_unlock(H5FD_t *file);
diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h
index 4183d14..4931e0f 100644
--- a/src/H5FDpublic.h
+++ b/src/H5FDpublic.h
@@ -272,7 +272,7 @@ typedef struct H5FD_class_t {
haddr_t addr, size_t size, void *buffer);
herr_t (*write)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl,
haddr_t addr, size_t size, const void *buffer);
- herr_t (*flush)(H5FD_t *file, hid_t dxpl_id, unsigned closing);
+ herr_t (*flush)(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
herr_t (*truncate)(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
herr_t (*lock)(H5FD_t *file, hbool_t rw);
herr_t (*unlock)(H5FD_t *file);
@@ -353,7 +353,7 @@ H5_DLL herr_t H5FDread(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id,
haddr_t addr, size_t size, void *buf/*out*/);
H5_DLL herr_t H5FDwrite(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id,
haddr_t addr, size_t size, const void *buf);
-H5_DLL herr_t H5FDflush(H5FD_t *file, hid_t dxpl_id, unsigned closing);
+H5_DLL herr_t H5FDflush(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
H5_DLL herr_t H5FDtruncate(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
H5_DLL herr_t H5FDlock(H5FD_t *file, hbool_t rw);
H5_DLL herr_t H5FDunlock(H5FD_t *file);
diff --git a/src/H5FDstdio.c b/src/H5FDstdio.c
index bc85c74..0168ff4 100644
--- a/src/H5FDstdio.c
+++ b/src/H5FDstdio.c
@@ -173,7 +173,7 @@ static herr_t H5FD_stdio_read(H5FD_t *lf, H5FD_mem_t type, hid_t fapl_id, haddr_
size_t size, void *buf);
static herr_t H5FD_stdio_write(H5FD_t *lf, H5FD_mem_t type, hid_t fapl_id, haddr_t addr,
size_t size, const void *buf);
-static herr_t H5FD_stdio_flush(H5FD_t *_file, hid_t dxpl_id, unsigned closing);
+static herr_t H5FD_stdio_flush(H5FD_t *_file, hid_t dxpl_id, hbool_t closing);
static herr_t H5FD_stdio_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing);
static herr_t H5FD_stdio_lock(H5FD_t *_file, hbool_t rw);
static herr_t H5FD_stdio_unlock(H5FD_t *_file);
@@ -955,7 +955,7 @@ H5FD_stdio_write(H5FD_t *_file, H5FD_mem_t /*UNUSED*/ type, hid_t /*UNUSED*/ dxp
*-------------------------------------------------------------------------
*/
static herr_t
-H5FD_stdio_flush(H5FD_t *_file, hid_t /*UNUSED*/ dxpl_id, unsigned closing)
+H5FD_stdio_flush(H5FD_t *_file, hid_t /*UNUSED*/ dxpl_id, hbool_t closing)
{
H5FD_stdio_t *file = (H5FD_stdio_t*)_file;
static const char *func = "H5FD_stdio_flush"; /* Function Name for error reporting */
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index 38556b7..759fff0 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -118,13 +118,13 @@ static herr_t H5HF__cache_dblock_free_icr(void *thing);
/* Debugging Function Prototypes */
#ifndef NDEBUG
-static herr_t H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
- H5HF_hdr_t *hdr, hbool_t *clean);
-static herr_t H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, hid_t dxpl_id,
+static herr_t H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
+ hbool_t *clean);
+static herr_t H5HF__cache_verify_iblock_descendants_clean(H5F_t *f,
H5HF_indirect_t *iblock, unsigned *iblock_status, hbool_t *clean);
static herr_t H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f,
H5HF_indirect_t *iblock, hbool_t *clean, hbool_t *has_dblocks);
-static herr_t H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, hid_t dxpl_id,
+static herr_t H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f,
H5HF_indirect_t *iblock, hbool_t *clean, hbool_t *has_iblocks);
#endif /* NDEBUG */
@@ -657,7 +657,7 @@ H5HF__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
*
* Do this with a call to H5HF__cache_verify_hdr_descendants_clean().
*/
- if(H5HF__cache_verify_hdr_descendants_clean((H5F_t *)f, dxpl_id, hdr, &descendants_clean) < 0)
+ if(H5HF__cache_verify_hdr_descendants_clean((H5F_t *)f, hdr, &descendants_clean) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify hdr descendants clean.")
HDassert(descendants_clean);
}
@@ -1165,7 +1165,7 @@ H5HF__cache_iblock_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
* there is no need to check to see if it is pinned or protected, or to
* protect it if it is not.
*/
- if(H5HF__cache_verify_iblock_descendants_clean((H5F_t *)f, dxpl_id, iblock, &iblock_status, &descendants_clean) < 0)
+ if(H5HF__cache_verify_iblock_descendants_clean((H5F_t *)f, iblock, &iblock_status, &descendants_clean) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify descendants clean.")
HDassert(descendants_clean);
}
@@ -2479,8 +2479,8 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
- H5HF_hdr_t * hdr, hbool_t *clean)
+H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
+ hbool_t *clean)
{
haddr_t hdr_addr; /* Address of header */
unsigned hdr_status = 0; /* Header cache entry status */
@@ -2561,122 +2561,11 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
*clean = TRUE;
else if(root_iblock_status & H5AC_ES__IS_DIRTY)
*clean = FALSE;
- else { /* must examine children */
- hbool_t unprotect_root_iblock = FALSE;
-
- /* At this point, the root iblock may be pinned, protected,
- * both, or neither, and we may or may not have a pointer
- * to root iblock in memory.
- *
- * Before we call H5HF__cache_verify_iblock_descendants_clean(),
- * we must ensure that the root iblock is either pinned or
- * protected or both, and that we have a pointer to it.
- * Do this as follows:
- */
- if(root_iblock == NULL) { /* we don't have ptr to root iblock */
- if(0 == (root_iblock_status & H5AC_ES__IS_PROTECTED)) {
- /* just protect the root iblock -- this will give us
- * the pointer we need to proceed, and ensure that
- * it is locked into the metadata cache for the
- * duration.
- *
- * Note that the udata is only used in the load callback.
- * While the fractal heap makes heavy use of the udata
- * in this case, since we know that the entry is in cache,
- * we can pass NULL udata.
- */
- if(NULL == (root_iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5C__READ_ONLY_FLAG)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
- unprotect_root_iblock = TRUE;
- } /* end if */
- else {
- /* the root iblock is protected, and we have no
- * legitimate way of getting a pointer to it.
- *
- * We square this circle by using the
- * H5AC_get_entry_ptr_from_addr() to get the needed
- * pointer.
- *
- * WARNING: This call should be used only in debugging
- * routines, and it should be avoided there when
- * possible.
- *
- * Further, if we ever multi-thread the cache,
- * this routine will have to be either discarded
- * or heavily re-worked.
- *
- * Finally, keep in mind that the entry whose
- * pointer is obtained in this fashion may not
- * be in a stable state.
- *
- * Assuming that the flush dependency code is working
- * as it should, the only reason for the root iblock to
- * be unpinned is if none of its children are in cache.
- * This unfortunately means that if it is protected and
- * not pinned, the fractal heap is in the process of loading
- * or inserting one of its children. The obvious implication
- * is that there is a significant chance that the root
- * iblock is in an unstable state.
- *
- * All this suggests that using H5AC_get_entry_ptr_from_addr()
- * to obtain the pointer to the protected root iblock is
- * questionable here. However, since this is test/debugging
- * code, I expect that we will use this approach until it
- * causes problems, or we think of a better way.
- */
- if(H5AC_get_entry_ptr_from_addr(f, root_iblock_addr, (void **)(&root_iblock)) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() failed.")
- HDassert(root_iblock);
- } /* end else */
- } /* end if */
- else { /* root_iblock != NULL */
- /* we have the pointer to the root iblock. Protect it
- * if it is neither pinned nor protected -- otherwise we
- * are ready to go.
- */
- H5HF_indirect_t * iblock = NULL;
-
- if(((root_iblock_status & H5AC_ES__IS_PINNED) == 0) &&
- ((root_iblock_status & H5AC_ES__IS_PROTECTED) == 0)) {
- /* the root iblock is neither pinned nor protected -- hence
- * we must protect it before we proceed
- *
- * Note that the udata is only used in the load callback.
- * While the fractal heap makes heavy use of the udata
- * in this case, since we know that the entry is in cache,
- * we can pass NULL udata.
- */
- if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5C__READ_ONLY_FLAG)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
- unprotect_root_iblock = TRUE;
- HDassert(iblock == root_iblock);
- } /* end if */
- } /* end else */
-
- /* at this point, one way or another, the root iblock is locked
- * in memory for the duration of the call. Do some sanity checks,
- * and then call H5HF__cache_verify_iblock_descendants_clean().
- */
- HDassert(hdr->root_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(hdr->root_iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
-
- if(H5HF__cache_verify_iblock_descendants_clean(f, dxpl_id, root_iblock, &root_iblock_status, clean) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify root iblock & descendants clean.")
-
- /* unprotect the root indirect block if required */
- if(unprotect_root_iblock) {
- HDassert(root_iblock);
- if(H5AC_unprotect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, root_iblock, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.")
- } /* end if */
- } /* end else */
} /* end if */
else if((hdr->man_dtable.curr_root_rows == 0) &&
(HADDR_UNDEF != hdr->man_dtable.table_addr)) {
haddr_t root_dblock_addr;
unsigned root_dblock_status = 0;
- hbool_t in_cache;
- hbool_t type_ok;
/* this is scenario 2 -- we have a root dblock */
root_dblock_addr = hdr->man_dtable.table_addr;
@@ -2684,27 +2573,16 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get root dblock status")
if(root_dblock_status & H5AC_ES__IN_CACHE) {
- if(H5AC_verify_entry_type(f, root_dblock_addr, &H5AC_FHEAP_DBLOCK[0], &in_cache, &type_ok) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check dblock type")
- HDassert(in_cache);
- if(!type_ok)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock addr doesn't refer to a dblock?!?")
-
/* If a root dblock is in cache, it must have a flush
- * dependency relationship with the header, and it
- * may not be the parent in any flush dependency
- * relationship.
- *
- * We don't test this fully, but we will verify that
- * the root iblock is a child in some flush dependency
- * relationship.
+ * dependency relationship with the header.
*/
if(0 == (root_dblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD))
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock in cache and not a flush dep child.")
if(0 != (root_dblock_status & H5AC_ES__IS_FLUSH_DEP_PARENT))
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock in cache and is a flush dep parent.")
- *clean = ! (root_dblock_status & H5AC_ES__IS_DIRTY);
+ if(root_dblock_status & H5AC_ES__IS_DIRTY)
+ *clean = FALSE;
} /* end if */
else /* root dblock not in cache */
*clean = TRUE;
@@ -2757,8 +2635,8 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, hid_t dxpl_id,
- H5HF_indirect_t *iblock, unsigned *iblock_status, hbool_t *clean)
+H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, H5HF_indirect_t *iblock,
+ unsigned *iblock_status, hbool_t *clean)
{
hbool_t has_dblocks = FALSE;
hbool_t has_iblocks = FALSE;
@@ -2778,12 +2656,9 @@ H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, hid_t dxpl_id,
if((*clean) && H5HF__cache_verify_iblocks_dblocks_clean(f, iblock, clean, &has_dblocks) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify dblocks clean.")
- if((*clean) && H5HF__cache_verify_descendant_iblocks_clean(f, dxpl_id, iblock, clean, &has_iblocks) < 0)
+ if((*clean) && H5HF__cache_verify_descendant_iblocks_clean(f, iblock, clean, &has_iblocks) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify iblocks clean.")
- if((NULL == iblock_status) && H5AC_get_entry_status(f, iblock->addr, iblock_status) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get iblock status")
-
/* verify that flush dependency setup is plausible */
if(0 == (*iblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD))
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "iblock is not a flush dep child.")
@@ -2855,41 +2730,25 @@ H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, H5HF_indirect_t *iblock,
dblock_addr = iblock->ents[i].addr;
if(H5F_addr_defined(dblock_addr)) {
- hbool_t in_cache;
- hbool_t type_ok;
-
- if(H5AC_verify_entry_type(f, dblock_addr, &H5AC_FHEAP_DBLOCK[0], &in_cache, &type_ok) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check dblock type")
-
- if(in_cache) { /* dblock is in cache */
- unsigned dblock_status = 0;
+ unsigned dblock_status = 0;
- if(!type_ok)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock addr doesn't refer to a dblock?!?")
+ if(H5AC_get_entry_status(f, dblock_addr, &dblock_status) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get dblock status")
+ if(dblock_status & H5AC_ES__IN_CACHE) {
+ *has_dblocks = TRUE;
- if(H5AC_get_entry_status(f, dblock_addr, &dblock_status) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get dblock status")
- HDassert(dblock_status & H5AC_ES__IN_CACHE);
-
- *has_dblocks = TRUE;
if(dblock_status & H5AC_ES__IS_DIRTY)
- *clean = FALSE;
+ *clean = FALSE;
/* If a child dblock is in cache, it must have a flush
* dependency relationship with this iblock, and it
* may not be the parent in any flush dependency
* relationship.
- *
- * We don't test this fully, but we will verify that
- * the child iblock is a child in some flush dependency
- * relationship.
*/
if(0 == (dblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD))
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock in cache and not a flush dep child.")
-
if(0 != (dblock_status & H5AC_ES__IS_FLUSH_DEP_PARENT))
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock in cache and is a flush dep parent.")
-
} /* end if */
} /* end if */
@@ -2931,8 +2790,8 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, hid_t dxpl_id,
- H5HF_indirect_t *iblock, hbool_t *clean, hbool_t *has_iblocks)
+H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, H5HF_indirect_t *iblock,
+ hbool_t *clean, hbool_t *has_iblocks)
{
unsigned first_iblock_index;
unsigned last_iblock_index;
@@ -2970,113 +2829,6 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, hid_t dxpl_id,
*has_iblocks = TRUE;
if(child_iblock_status & H5AC_ES__IS_DIRTY)
*clean = FALSE;
-
- /* if the child iblock is in cache and *clean is TRUE,
- * we must continue to explore down the fractal heap tree
- * structure to verify that all descendant blocks are either
- * clean, or not in the metadata cache. We do this with a
- * recursive call to
- * H5HF__cache_verify_iblock_descendants_clean().
- * However, we can't make this call unless the child iblock
- * is somehow locked into the cache -- typically via either
- * pinning or protecting.
- *
- * If the child iblock is pinned, we can look up its pointer
- * on the current iblock's pinned child iblock list, and
- * and use that pointer in the recursive call.
- *
- * If the entry is unprotected and unpinned, we simply
- * protect it.
- *
- * If, however, the the child iblock is already protected,
- * but not pinned, we have a bit of a problem, as we have
- * no legitimate way of looking up its pointer in memory.
- *
- * To solve this problem, I have added a new metadata cache
- * call to obtain the pointer.
- *
- * WARNING: This call should be used only in debugging
- * routines, and it should be avoided there when
- * possible.
- *
- * Further, if we ever multi-thread the cache,
- * this routine will have to be either discarded
- * or heavily re-worked.
- *
- * Finally, keep in mind that the entry whose
- * pointer is obtained in this fashion may not
- * be in a stable state.
- *
- * Assuming that the flush dependency code is working
- * as it should, the only reason for the child entry to
- * be unpinned is if none of its children are in cache.
- * This unfortunately means that if it is protected and
- * not pinned, the fractal heap is in the process of loading
- * or inserting one of its children. The obvious implication
- * is that there is a significant chance that the child
- * iblock is in an unstable state.
- *
- * All this suggests that using the new call to obtain the
- * pointer to the protected child iblock is questionable
- * here. However, since this is test/debugging code, I
- * expect that we will use this approach until it causes
- * problems, or we think of a better way.
- */
- if(*clean) {
- H5HF_indirect_t *child_iblock = NULL;
- hbool_t unprotect_child_iblock = FALSE;
-
- if(0 == (child_iblock_status & H5AC_ES__IS_PINNED)) {
- /* child iblock is not pinned */
- if(0 == (child_iblock_status & H5AC_ES__IS_PROTECTED)) {
- /* child iblock is unprotected, and unpinned */
- /* protect it. Note that the udata is only */
- /* used in the load callback. While the */
- /* fractal heap makes heavy use of the udata */
- /* in this case, since we know that the */
- /* entry is in cache, we can pass NULL udata */
-
- if(NULL == (child_iblock = (H5HF_indirect_t *) H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, child_iblock_addr, NULL, H5C__READ_ONLY_FLAG)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
- unprotect_child_iblock = TRUE;
- } /* end if */
- else {
- /* child iblock is protected -- use */
- /* H5AC_get_entry_ptr_from_addr() to get a */
- /* pointer to the entry. This is very slimy -- */
- /* come up with a better solution. */
- if(H5AC_get_entry_ptr_from_addr(f, child_iblock_addr, (void **)(&child_iblock)) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() faild.")
- HDassert(child_iblock);
- } /* end else */
- } /* end if */
- else {
- /* child iblock is pinned -- look it up in the */
- /* parent iblocks child_iblocks array. */
- HDassert(iblock->child_iblocks);
- child_iblock = iblock->child_iblocks[i - first_iblock_index];
- } /* end else */
-
- /* At this point, one way or another we should have
- * a pointer to the child iblock. Verify that we
- * that we have the correct one.
- */
- HDassert(child_iblock);
- HDassert(child_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(child_iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
- HDassert(child_iblock->addr == child_iblock_addr);
-
- /* now make the recursive call */
- if(H5HF__cache_verify_iblock_descendants_clean(f, dxpl_id, child_iblock, &child_iblock_status, clean) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify child iblock clean.")
-
- /* if we protected the child iblock, unprotect it now */
- if(unprotect_child_iblock) {
- if(H5AC_unprotect(f, dxpl_id, H5AC_FHEAP_IBLOCK, child_iblock_addr, child_iblock, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.")
-
- } /* end if */
- } /* end if */
} /* end if */
} /* end if */
diff --git a/src/H5MM.c b/src/H5MM.c
index 5c2a731..daef7b1 100644
--- a/src/H5MM.c
+++ b/src/H5MM.c
@@ -156,7 +156,7 @@ H5MM__is_our_block(void *mem)
*
*-------------------------------------------------------------------------
*/
-static void
+H5_ATTR_PURE static void
H5MM__sanity_check_block(const H5MM_block_t *block)
{
HDassert(block->u.info.size > 0);
@@ -182,7 +182,7 @@ H5MM__sanity_check_block(const H5MM_block_t *block)
*
*-------------------------------------------------------------------------
*/
-static void
+H5_ATTR_PURE static void
H5MM__sanity_check(void *mem)
{
H5MM_block_t *block = H5MM_BLOCK_FROM_BUF(mem);
@@ -203,7 +203,7 @@ H5MM__sanity_check(void *mem)
*
*-------------------------------------------------------------------------
*/
-void
+H5_ATTR_PURE void
H5MM_sanity_check_all(void)
{
H5MM_block_t *curr = NULL;
@@ -228,7 +228,7 @@ H5MM_sanity_check_all(void)
*
*-------------------------------------------------------------------------
*/
-void
+H5_ATTR_PURE void
H5MM_final_sanity_check(void)
{
HDassert(0 == H5MM_curr_alloc_bytes_s);
diff --git a/src/H5PL.c b/src/H5PL.c
index a196884..f482fef 100644
--- a/src/H5PL.c
+++ b/src/H5PL.c
@@ -704,7 +704,7 @@ H5PL__search_table(H5PL_type_t plugin_type, int type_id, const void **info)
const H5Z_class2_t *plugin_info;
if(NULL == (get_plugin_info = (H5PL_get_plugin_info_t)H5PL_GET_LIB_FUNC((H5PL_table_g[i]).handle, "H5PLget_plugin_info")))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTGET, FAIL, "can't get function for H5PLget_plugin_info")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTGET, FAIL, "can't get function for H5PLget_plugin_info")
if(NULL == (plugin_info = (const H5Z_class2_t *)(*get_plugin_info)()))
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTGET, FAIL, "can't get plugin info")
diff --git a/src/H5T.c b/src/H5T.c
index dca1a30..fd73f09 100644
--- a/src/H5T.c
+++ b/src/H5T.c
@@ -3090,8 +3090,10 @@ H5T__create(H5T_class_t type, size_t size)
subtype = H5T_NATIVE_INT_g;
else if(sizeof(long) == size)
subtype = H5T_NATIVE_LONG_g;
+#if H5_SIZEOF_LONG != H5_SIZEOF_LONG_LONG
else if(sizeof(long long) == size)
subtype = H5T_NATIVE_LLONG_g;
+#endif /* H5_SIZEOF_LONG != H5_SIZEOF_LONG_LONG */
else
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "no applicable native integer type")
if(NULL == (dt = H5T__alloc()))
diff --git a/src/H5Znbit.c b/src/H5Znbit.c
index e2fb300..04e8869 100644
--- a/src/H5Znbit.c
+++ b/src/H5Znbit.c
@@ -29,10 +29,10 @@
* one nbit atomic datatype: integer or floating-point
*/
typedef struct {
- size_t size; /* size of datatype */
- int order; /* datatype endianness order */
- int precision; /* datatype precision */
- int offset; /* datatype offset */
+ unsigned size; /* size of datatype */
+ unsigned order; /* datatype endianness order */
+ unsigned precision; /* datatype precision */
+ unsigned offset; /* datatype offset */
} parms_atomic;
/* Local function prototypes */
@@ -41,41 +41,50 @@ static herr_t H5Z_set_local_nbit(hid_t dcpl_id, hid_t type_id, hid_t space_id);
static size_t H5Z_filter_nbit(unsigned flags, size_t cd_nelmts, const unsigned cd_values[],
size_t nbytes, size_t *buf_size, void **buf);
-static void H5Z_calc_parms_nooptype(void);
-static void H5Z_calc_parms_atomic(void);
-static herr_t H5Z_calc_parms_array(const H5T_t *type);
-static herr_t H5Z_calc_parms_compound(const H5T_t *type);
-
-static herr_t H5Z_set_parms_nooptype(const H5T_t *type, unsigned cd_values[]);
-static herr_t H5Z_set_parms_atomic(const H5T_t *type, unsigned cd_values[]);
-static herr_t H5Z_set_parms_array(const H5T_t *type, unsigned cd_values[]);
-static herr_t H5Z_set_parms_compound(const H5T_t *type, unsigned cd_values[]);
-
-static void H5Z_nbit_next_byte(size_t *j, int *buf_len);
-static void H5Z_nbit_decompress_one_byte(unsigned char *data, size_t data_offset, int k, int begin_i,
-int end_i, unsigned char *buffer, size_t *j, int *buf_len, parms_atomic p, int datatype_len);
-static void H5Z_nbit_compress_one_byte(unsigned char *data, size_t data_offset, int k, int begin_i,
-int end_i, unsigned char *buffer, size_t *j, int *buf_len, parms_atomic p, int datatype_len);
+static void H5Z_calc_parms_nooptype(size_t *cd_values_actual_nparms);
+static void H5Z_calc_parms_atomic(size_t *cd_values_actual_nparms);
+static herr_t H5Z_calc_parms_array(const H5T_t *type, size_t *cd_values_actual_nparms);
+static herr_t H5Z_calc_parms_compound(const H5T_t *type, size_t *cd_values_actual_nparms);
+
+static herr_t H5Z_set_parms_nooptype(const H5T_t *type, unsigned *cd_values_index,
+ unsigned cd_values[]);
+static herr_t H5Z_set_parms_atomic(const H5T_t *type, unsigned *cd_values_index,
+ unsigned cd_values[], hbool_t *need_not_compress);
+static herr_t H5Z_set_parms_array(const H5T_t *type, unsigned *cd_values_index,
+ unsigned cd_values[], hbool_t *need_not_compress);
+static herr_t H5Z_set_parms_compound(const H5T_t *type, unsigned *cd_values_index,
+ unsigned cd_values[], hbool_t *need_not_compress);
+
+static void H5Z_nbit_next_byte(size_t *j, size_t *buf_len);
+static void H5Z_nbit_decompress_one_byte(unsigned char *data, size_t data_offset,
+ unsigned k, unsigned begin_i, unsigned end_i, unsigned char *buffer, size_t *j,
+ size_t *buf_len, const parms_atomic *p, size_t datatype_len);
+static void H5Z_nbit_compress_one_byte(unsigned char *data, size_t data_offset, unsigned k, unsigned begin_i,
+ unsigned end_i, unsigned char *buffer, size_t *j, size_t *buf_len, const parms_atomic *p, size_t datatype_len);
static void H5Z_nbit_decompress_one_nooptype(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, unsigned size);
+ unsigned char *buffer, size_t *j, size_t *buf_len, unsigned size);
static void H5Z_nbit_decompress_one_atomic(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, parms_atomic p);
+ unsigned char *buffer, size_t *j, size_t *buf_len, const parms_atomic *p);
static void H5Z_nbit_decompress_one_array(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, const unsigned parms[]);
+ unsigned char *buffer, size_t *j, size_t *buf_len, const unsigned parms[],
+ unsigned *parms_index);
static void H5Z_nbit_decompress_one_compound(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, const unsigned parms[]);
+ unsigned char *buffer, size_t *j, size_t *buf_len, const unsigned parms[],
+ unsigned *parms_index);
static void H5Z_nbit_decompress(unsigned char *data, unsigned d_nelmts, unsigned char *buffer,
- const unsigned parms[]);
+ const unsigned parms[]);
static void H5Z_nbit_compress_one_nooptype(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, unsigned size);
+ unsigned char *buffer, size_t *j, size_t *buf_len, unsigned size);
static void H5Z_nbit_compress_one_atomic(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, parms_atomic p);
+ unsigned char *buffer, size_t *j, size_t *buf_len, const parms_atomic *p);
static void H5Z_nbit_compress_one_array(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, const unsigned parms[]);
+ unsigned char *buffer, size_t *j, size_t *buf_len, const unsigned parms[],
+ unsigned *parms_index);
static void H5Z_nbit_compress_one_compound(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, const unsigned parms[]);
+ unsigned char *buffer, size_t *j, size_t *buf_len, const unsigned parms[],
+ unsigned *parms_index);
static void H5Z_nbit_compress(unsigned char *data, unsigned d_nelmts, unsigned char *buffer,
- size_t *buffer_size, const unsigned parms[]);
+ size_t *buffer_size, const unsigned parms[]);
/* This message derives from H5Z */
H5Z_class2_t H5Z_NBIT[1] = {{
@@ -99,16 +108,6 @@ H5Z_class2_t H5Z_NBIT[1] = {{
#define H5Z_NBIT_ORDER_BE 1 /* Big endian for datatype byte order */
/* Local variables */
-/*
- * cd_values_index: index of array cd_values inside function H5Z_set_local_nbit
- * cd_values_actual_nparms: number of parameters in array cd_values[]
- * need_not_compress: flag if TRUE indicating no need to do nbit compression
- * parms_index: index of array parms used by compression/decompression functions
- */
-static unsigned cd_values_index = 0;
-static size_t cd_values_actual_nparms = 0;
-static unsigned char need_not_compress = FALSE;
-static unsigned parms_index = 0;
/*-------------------------------------------------------------------------
@@ -166,13 +165,14 @@ done:
*
*-------------------------------------------------------------------------
*/
-static void H5Z_calc_parms_nooptype(void)
+static void
+H5Z_calc_parms_nooptype(size_t *cd_values_actual_nparms)
{
/* Store datatype class code */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
/* Store datatype size */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
}
@@ -190,22 +190,23 @@ static void H5Z_calc_parms_nooptype(void)
*
*-------------------------------------------------------------------------
*/
-static void H5Z_calc_parms_atomic(void)
+static void
+H5Z_calc_parms_atomic(size_t *cd_values_actual_nparms)
{
/* Store datatype class code */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
/* Store datatype size */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
/* Store datatype endianness */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
/* Store datatype's precision */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
/* Store datatype's offset */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
}
@@ -227,7 +228,7 @@ static void H5Z_calc_parms_atomic(void)
*-------------------------------------------------------------------------
*/
static herr_t
-H5Z_calc_parms_array(const H5T_t *type)
+H5Z_calc_parms_array(const H5T_t *type, size_t *cd_values_actual_nparms)
{
H5T_t *dtype_base = NULL; /* Array datatype's base datatype */
H5T_class_t dtype_base_class; /* Array datatype's base datatype's class */
@@ -236,10 +237,10 @@ H5Z_calc_parms_array(const H5T_t *type)
FUNC_ENTER_NOAPI_NOINIT
/* Store datatype class code */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
/* Store array datatype's size */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
/* Get array datatype's base datatype */
if(NULL == (dtype_base = H5T_get_super(type)))
@@ -253,16 +254,16 @@ H5Z_calc_parms_array(const H5T_t *type)
switch(dtype_base_class) {
case H5T_INTEGER:
case H5T_FLOAT:
- H5Z_calc_parms_atomic();
+ H5Z_calc_parms_atomic(cd_values_actual_nparms);
break;
case H5T_ARRAY:
- if(H5Z_calc_parms_array(dtype_base) == FAIL)
+ if(H5Z_calc_parms_array(dtype_base, cd_values_actual_nparms) == FAIL)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot compute parameters for datatype")
break;
case H5T_COMPOUND:
- if(H5Z_calc_parms_compound(dtype_base) == FAIL)
+ if(H5Z_calc_parms_compound(dtype_base, cd_values_actual_nparms) == FAIL)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot compute parameters for datatype")
break;
@@ -274,7 +275,7 @@ H5Z_calc_parms_array(const H5T_t *type)
case H5T_ENUM:
case H5T_VLEN:
/* Other datatype classes: nbit does no compression */
- H5Z_calc_parms_nooptype();
+ H5Z_calc_parms_nooptype(cd_values_actual_nparms);
break;
case H5T_NO_CLASS:
@@ -312,31 +313,32 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5Z_calc_parms_compound(const H5T_t *type)
+H5Z_calc_parms_compound(const H5T_t *type, size_t *cd_values_actual_nparms)
{
int nmembers; /* Compound datatype's number of members */
H5T_t *dtype_member = NULL; /* Compound datatype's member datatype */
- H5T_class_t dtype_member_class; /* Compound datatype's member datatype's class */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
/* Store compound datatype class code */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
/* Store compound datatype's size */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
/* Get number of members */
if((nmembers = H5T_get_nmembers(type)) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad datatype number of members")
/* Store number of members */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
/* For each member, calculate parameters */
for(u = 0; u < (unsigned)nmembers; u++) {
+ H5T_class_t dtype_member_class; /* Compound datatype's member datatype's class */
+
/* Get member datatype */
if(NULL == (dtype_member = H5T_get_member_type(type, u, H5T_COPY_TRANSIENT)))
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad member datatype")
@@ -346,22 +348,22 @@ H5Z_calc_parms_compound(const H5T_t *type)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad member datatype class")
/* Store member offset */
- ++cd_values_actual_nparms;
+ *cd_values_actual_nparms += 1;
/* Calculate parameters according to member's datatype class */
switch(dtype_member_class) {
case H5T_INTEGER:
case H5T_FLOAT:
- H5Z_calc_parms_atomic();
+ H5Z_calc_parms_atomic(cd_values_actual_nparms);
break;
case H5T_ARRAY:
- if(H5Z_calc_parms_array(dtype_member) == FAIL)
+ if(H5Z_calc_parms_array(dtype_member, cd_values_actual_nparms) == FAIL)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot compute parameters for datatype")
break;
case H5T_COMPOUND:
- if(H5Z_calc_parms_compound(dtype_member) == FAIL)
+ if(H5Z_calc_parms_compound(dtype_member, cd_values_actual_nparms) == FAIL)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot compute parameters for datatype")
break;
@@ -373,7 +375,7 @@ H5Z_calc_parms_compound(const H5T_t *type)
case H5T_ENUM:
case H5T_VLEN:
/* Other datatype classes: nbit does no compression */
- H5Z_calc_parms_nooptype();
+ H5Z_calc_parms_nooptype(cd_values_actual_nparms);
break;
case H5T_NO_CLASS:
@@ -418,7 +420,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5Z_set_parms_nooptype(const H5T_t *type, unsigned cd_values[])
+H5Z_set_parms_nooptype(const H5T_t *type, unsigned *cd_values_index, unsigned cd_values[])
{
size_t dtype_size; /* No-op datatype's size (in bytes) */
herr_t ret_value = SUCCEED; /* Return value */
@@ -426,14 +428,15 @@ H5Z_set_parms_nooptype(const H5T_t *type, unsigned cd_values[])
FUNC_ENTER_NOAPI_NOINIT
/* Set datatype class code */
- cd_values[cd_values_index++] = H5Z_NBIT_NOOPTYPE;
+ cd_values[(*cd_values_index)++] = H5Z_NBIT_NOOPTYPE;
/* Get datatype's size */
if((dtype_size = H5T_get_size(type)) == 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad datatype size")
/* Set "local" parameter for datatype size */
- cd_values[cd_values_index++] = dtype_size;
+ H5_CHECK_OVERFLOW(dtype_size, size_t, unsigned);
+ cd_values[(*cd_values_index)++] = (unsigned)dtype_size;
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -457,25 +460,28 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5Z_set_parms_atomic(const H5T_t *type, unsigned cd_values[])
+H5Z_set_parms_atomic(const H5T_t *type, unsigned *cd_values_index,
+ unsigned cd_values[], hbool_t *need_not_compress)
{
H5T_order_t dtype_order; /* Atomic datatype's endianness order */
size_t dtype_size; /* Atomic datatype's size (in bytes) */
size_t dtype_precision; /* Atomic datatype's precision (in bits) */
- int dtype_offset; /* Atomic datatype's offset (in bits) */
+ int sdtype_offset; /* Atomic datatype's offset (in bits) */
+ unsigned dtype_offset; /* Atomic datatype's offset (in bits) */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
/* Set datatype class code */
- cd_values[cd_values_index++] = H5Z_NBIT_ATOMIC;
+ cd_values[(*cd_values_index)++] = H5Z_NBIT_ATOMIC;
/* Get datatype's size */
if((dtype_size = H5T_get_size(type)) == 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad datatype size")
/* Set "local" parameter for datatype size */
- cd_values[cd_values_index++] = dtype_size;
+ H5_CHECK_OVERFLOW(dtype_size, size_t, unsigned);
+ cd_values[(*cd_values_index)++] = (unsigned)dtype_size;
/* Get datatype's endianness order */
if((dtype_order = H5T_get_order(type)) == H5T_ORDER_ERROR)
@@ -484,11 +490,11 @@ H5Z_set_parms_atomic(const H5T_t *type, unsigned cd_values[])
/* Set "local" parameter for datatype endianness */
switch(dtype_order) {
case H5T_ORDER_LE: /* Little-endian byte order */
- cd_values[cd_values_index++] = H5Z_NBIT_ORDER_LE;
+ cd_values[(*cd_values_index)++] = H5Z_NBIT_ORDER_LE;
break;
case H5T_ORDER_BE: /* Big-endian byte order */
- cd_values[cd_values_index++] = H5Z_NBIT_ORDER_BE;
+ cd_values[(*cd_values_index)++] = H5Z_NBIT_ORDER_BE;
break;
case H5T_ORDER_VAX:
@@ -504,27 +510,28 @@ H5Z_set_parms_atomic(const H5T_t *type, unsigned cd_values[])
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad datatype precision")
/* Get datatype's offset */
- if((dtype_offset = H5T_get_offset(type)) < 0)
+ if((sdtype_offset = H5T_get_offset(type)) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad datatype offset")
+ dtype_offset = (unsigned)sdtype_offset;
/* Check values of precision and offset */
- if(dtype_precision > dtype_size * 8 || (dtype_precision + dtype_offset) > dtype_size * 8
- || dtype_precision <= 0 || dtype_offset < 0)
+ if(dtype_precision > dtype_size * 8 || (dtype_precision + dtype_offset) > dtype_size * 8)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "invalid datatype precision/offset")
/* Set "local" parameter for datatype precision */
- cd_values[cd_values_index++] = dtype_precision;
+ H5_CHECK_OVERFLOW(dtype_precision, size_t, unsigned);
+ cd_values[(*cd_values_index)++] = (unsigned)dtype_precision;
/* Set "local" parameter for datatype offset */
- cd_values[cd_values_index++] = dtype_offset;
+ cd_values[(*cd_values_index)++] = dtype_offset;
/* If before this point, there is no need to compress, check the need to
* compress at this point. If current datatype is not full-precision,
* flag need_not_compress should be set to FALSE.
*/
- if(need_not_compress) /* so far no need to compress */
+ if(*need_not_compress) /* so far no need to compress */
if(dtype_offset != 0 || dtype_precision != dtype_size * 8)
- need_not_compress = FALSE;
+ *need_not_compress = FALSE;
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5Z_set_parms_atomic() */
@@ -547,7 +554,8 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5Z_set_parms_array(const H5T_t *type, unsigned cd_values[])
+H5Z_set_parms_array(const H5T_t *type, unsigned *cd_values_index,
+ unsigned cd_values[], hbool_t *need_not_compress)
{
H5T_t *dtype_base = NULL; /* Array datatype's base datatype */
H5T_class_t dtype_base_class; /* Array datatype's base datatype's class */
@@ -558,14 +566,15 @@ H5Z_set_parms_array(const H5T_t *type, unsigned cd_values[])
FUNC_ENTER_NOAPI_NOINIT
/* Set datatype class code */
- cd_values[cd_values_index++] = H5Z_NBIT_ARRAY;
+ cd_values[(*cd_values_index)++] = H5Z_NBIT_ARRAY;
/* Get array datatype's size */
if((dtype_size = H5T_get_size(type)) == 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad datatype size")
/* Set "local" parameter for array datatype's size */
- cd_values[cd_values_index++]=dtype_size;
+ H5_CHECK_OVERFLOW(dtype_size, size_t, unsigned);
+ cd_values[(*cd_values_index)++] = (unsigned)dtype_size;
/* Get array datatype's base datatype */
if(NULL == (dtype_base = H5T_get_super(type)))
@@ -579,17 +588,17 @@ H5Z_set_parms_array(const H5T_t *type, unsigned cd_values[])
switch(dtype_base_class) {
case H5T_INTEGER:
case H5T_FLOAT:
- if(H5Z_set_parms_atomic(dtype_base, cd_values) == FAIL)
+ if(H5Z_set_parms_atomic(dtype_base, cd_values_index, cd_values, need_not_compress) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
case H5T_ARRAY:
- if(H5Z_set_parms_array(dtype_base, cd_values) == FAIL)
+ if(H5Z_set_parms_array(dtype_base, cd_values_index, cd_values, need_not_compress) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
case H5T_COMPOUND:
- if(H5Z_set_parms_compound(dtype_base, cd_values) == FAIL)
+ if(H5Z_set_parms_compound(dtype_base, cd_values_index, cd_values, need_not_compress) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
@@ -602,7 +611,7 @@ H5Z_set_parms_array(const H5T_t *type, unsigned cd_values[])
if(dtype_base_class == H5T_VLEN || is_vlstring)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "datatype not supported by nbit")
- if(H5Z_set_parms_nooptype(dtype_base, cd_values) == FAIL)
+ if(H5Z_set_parms_nooptype(dtype_base, cd_values_index, cd_values) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
@@ -612,7 +621,7 @@ H5Z_set_parms_array(const H5T_t *type, unsigned cd_values[])
case H5T_OPAQUE:
case H5T_REFERENCE:
case H5T_ENUM:
- if(H5Z_set_parms_nooptype(dtype_base, cd_values) == FAIL)
+ if(H5Z_set_parms_nooptype(dtype_base, cd_values_index, cd_values) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
@@ -650,9 +659,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5Z_set_parms_compound(const H5T_t *type, unsigned cd_values[])
+H5Z_set_parms_compound(const H5T_t *type, unsigned *cd_values_index,
+ unsigned cd_values[], hbool_t *need_not_compress)
{
- int nmembers; /* Compound datatype's number of members */
+ int snmembers; /* Compound datatype's number of members */
+ unsigned nmembers; /* Compound datatype's number of members */
H5T_t *dtype_member = NULL; /* Compound datatype's member datatype */
H5T_class_t dtype_member_class; /* Compound datatype's member datatype's class */
size_t dtype_member_offset; /* Compound datatype's current member datatype's offset (in bytes) */
@@ -665,24 +676,26 @@ H5Z_set_parms_compound(const H5T_t *type, unsigned cd_values[])
FUNC_ENTER_NOAPI_NOINIT
/* Set "local" parameter for compound datatype class code */
- cd_values[cd_values_index++] = H5Z_NBIT_COMPOUND;
+ cd_values[(*cd_values_index)++] = H5Z_NBIT_COMPOUND;
/* Get datatype's size */
if((dtype_size = H5T_get_size(type)) == 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad datatype size")
/* Set "local" parameter for compound datatype size */
- cd_values[cd_values_index++] = dtype_size;
+ H5_CHECK_OVERFLOW(dtype_size, size_t, unsigned);
+ cd_values[(*cd_values_index)++] = (unsigned)dtype_size;
/* Get number of members */
- if((nmembers = H5T_get_nmembers(type)) < 0)
+ if((snmembers = H5T_get_nmembers(type)) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad datatype number of members")
+ nmembers = (unsigned)snmembers;
/* Set "local" parameter for number of members */
- cd_values[cd_values_index++] = nmembers;
+ cd_values[(*cd_values_index)++] = nmembers;
/* For each member, set parameters */
- for(u = 0; u < (unsigned)nmembers; u++) {
+ for(u = 0; u < nmembers; u++) {
/* Get member datatype */
if(NULL == (dtype_member = H5T_get_member_type(type, u, H5T_COPY_TRANSIENT)))
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad member datatype")
@@ -695,23 +708,24 @@ H5Z_set_parms_compound(const H5T_t *type, unsigned cd_values[])
dtype_member_offset = H5T_get_member_offset(type, u);
/* Set "local" parameter for member offset */
- cd_values[cd_values_index++] = dtype_member_offset;
+ H5_CHECK_OVERFLOW(dtype_member_offset, size_t, unsigned);
+ cd_values[(*cd_values_index)++] = (unsigned)dtype_member_offset;
/* Call appropriate function according to member's datatype class */
switch(dtype_member_class) {
case H5T_INTEGER:
case H5T_FLOAT:
- if(H5Z_set_parms_atomic(dtype_member, cd_values) == FAIL)
+ if(H5Z_set_parms_atomic(dtype_member, cd_values_index, cd_values, need_not_compress) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
case H5T_ARRAY:
- if(H5Z_set_parms_array(dtype_member, cd_values) == FAIL)
+ if(H5Z_set_parms_array(dtype_member, cd_values_index, cd_values, need_not_compress) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
case H5T_COMPOUND:
- if(H5Z_set_parms_compound(dtype_member, cd_values) == FAIL)
+ if(H5Z_set_parms_compound(dtype_member, cd_values_index, cd_values, need_not_compress) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
@@ -721,22 +735,24 @@ H5Z_set_parms_compound(const H5T_t *type, unsigned cd_values[])
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "cannot determine if datatype is a variable-length string")
/* Because for some no-op datatype (VL datatype and VL string datatype), its
- * size can not be retrieved correctly by using function call H5T_get_size,
- * special handling is needed for getting the size. Here the difference between
+ * size can not be retrieved correctly by using function call H5T_get_size,
+ * special handling is needed for getting the size. Here the difference between
* adjacent member offset is used (if alignment is present, the result can be
- * larger, but it does not affect the nbit filter's correctness).
+ * larger, but it does not affect the nbit filter's correctness).
*/
if(dtype_member_class == H5T_VLEN || is_vlstring) {
/* Set datatype class code */
- cd_values[cd_values_index++] = H5Z_NBIT_NOOPTYPE;
+ cd_values[(*cd_values_index)++] = H5Z_NBIT_NOOPTYPE;
- if(u != (unsigned)nmembers - 1)
+ if(u != nmembers - 1)
dtype_next_member_offset = H5T_get_member_offset(type, u + 1);
else /* current member is the last member */
dtype_next_member_offset = dtype_size;
/* Set "local" parameter for datatype size */
- cd_values[cd_values_index++] = dtype_next_member_offset - dtype_member_offset;
+ H5_CHECK_OVERFLOW(dtype_member_offset, size_t, unsigned);
+ H5_CHECK_OVERFLOW(dtype_next_member_offset, size_t, unsigned);
+ cd_values[(*cd_values_index)++] = (unsigned)dtype_next_member_offset - (unsigned)dtype_member_offset;
}
break;
@@ -747,7 +763,7 @@ H5Z_set_parms_compound(const H5T_t *type, unsigned cd_values[])
case H5T_REFERENCE:
case H5T_ENUM:
/* other datatype that nbit does no compression */
- if(H5Z_set_parms_nooptype(dtype_member, cd_values) == FAIL)
+ if(H5Z_set_parms_nooptype(dtype_member, cd_values_index, cd_values) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
@@ -796,10 +812,13 @@ H5Z_set_local_nbit(hid_t dcpl_id, hid_t type_id, hid_t space_id)
const H5T_t *type; /* Datatype */
const H5S_t *ds; /* Dataspace */
unsigned flags; /* Filter flags */
+ unsigned cd_values_index; /* Index of array cd_values */
+ size_t cd_values_actual_nparms; /* Number of parameters in array cd_values[] */
size_t cd_nelmts = H5Z_NBIT_USER_NPARMS; /* Number of filter parameters */
unsigned *cd_values = NULL; /* Filter parameters */
hssize_t npoints; /* Number of points in the dataspace */
H5T_class_t dtype_class; /* Datatype's class */
+ hbool_t need_not_compress; /* Flag if TRUE indicating no need to do nbit compression */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -822,16 +841,16 @@ H5Z_set_local_nbit(hid_t dcpl_id, hid_t type_id, hid_t space_id)
switch(dtype_class) {
case H5T_INTEGER:
case H5T_FLOAT:
- H5Z_calc_parms_atomic();
+ H5Z_calc_parms_atomic(&cd_values_actual_nparms);
break;
case H5T_ARRAY:
- if(H5Z_calc_parms_array(type) == FAIL)
+ if(H5Z_calc_parms_array(type, &cd_values_actual_nparms) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot compute parameters for datatype")
break;
case H5T_COMPOUND:
- if(H5Z_calc_parms_compound(type) == FAIL)
+ if(H5Z_calc_parms_compound(type, &cd_values_actual_nparms) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot compute parameters for datatype")
break;
@@ -882,7 +901,8 @@ H5Z_set_local_nbit(hid_t dcpl_id, hid_t type_id, hid_t space_id)
cd_values_index = 2;
/* Set "local" parameter for number of elements in the chunk */
- H5_CHECKED_ASSIGN(cd_values[cd_values_index++], unsigned, npoints, hssize_t);
+ H5_CHECK_OVERFLOW(npoints, hssize_t, unsigned);
+ cd_values[cd_values_index++] = (unsigned)npoints;
/* Assume no need to compress now, will be changed to FALSE later if not */
need_not_compress = TRUE;
@@ -891,17 +911,17 @@ H5Z_set_local_nbit(hid_t dcpl_id, hid_t type_id, hid_t space_id)
switch(dtype_class) {
case H5T_INTEGER:
case H5T_FLOAT:
- if(H5Z_set_parms_atomic(type, cd_values) < 0)
+ if(H5Z_set_parms_atomic(type, &cd_values_index, cd_values, &need_not_compress) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
case H5T_ARRAY:
- if(H5Z_set_parms_array(type, cd_values) < 0)
+ if(H5Z_set_parms_array(type, &cd_values_index, cd_values, &need_not_compress) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
case H5T_COMPOUND:
- if(H5Z_set_parms_compound(type, cd_values) < 0)
+ if(H5Z_set_parms_compound(type, &cd_values_index, cd_values, &need_not_compress) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "nbit cannot set parameters for datatype")
break;
@@ -927,8 +947,9 @@ H5Z_set_local_nbit(hid_t dcpl_id, hid_t type_id, hid_t space_id)
HDassert(cd_values_actual_nparms == cd_values_index);
/* Finally set the first two entries of cd_values[] */
- cd_values[0] = cd_values_actual_nparms;
- cd_values[1] = need_not_compress;
+ H5_CHECK_OVERFLOW(cd_values_actual_nparms, size_t, unsigned);
+ cd_values[0] = (unsigned)cd_values_actual_nparms;
+ cd_values[1] = (unsigned)need_not_compress;
/* Modify the filter's parameters for this dataset */
if(H5P_modify_filter(dcpl_plist, H5Z_FILTER_NBIT, flags, cd_values_actual_nparms, cd_values) < 0)
@@ -957,7 +978,7 @@ done:
*/
static size_t
H5Z_filter_nbit(unsigned flags, size_t cd_nelmts, const unsigned cd_values[],
- size_t nbytes, size_t *buf_size, void **buf)
+ size_t nbytes, size_t *buf_size, void **buf)
{
unsigned char *outbuf; /* pointer to new output buffer */
size_t size_out = 0; /* size of output buffer */
@@ -1027,63 +1048,64 @@ done:
*/
static void
-H5Z_nbit_next_byte(size_t *j, int *buf_len)
+H5Z_nbit_next_byte(size_t *j, size_t *buf_len)
{
++(*j);
*buf_len = 8 * sizeof(unsigned char);
}
static void
-H5Z_nbit_decompress_one_byte(unsigned char *data, size_t data_offset, int k,
- int begin_i, int end_i, unsigned char *buffer, size_t *j, int *buf_len,
- parms_atomic p, int datatype_len)
+H5Z_nbit_decompress_one_byte(unsigned char *data, size_t data_offset, unsigned k,
+ unsigned begin_i, unsigned end_i, unsigned char *buffer, size_t *j, size_t *buf_len,
+ const parms_atomic *p, size_t datatype_len)
{
- int dat_len; /* dat_len is the number of bits to be copied in each data byte */
- int uchar_offset;
+ size_t dat_len; /* dat_len is the number of bits to be copied in each data byte */
+ size_t dat_offset;
unsigned char val; /* value to be copied in each data byte */
/* initialize value and bits of unsigned char to be copied */
val = buffer[*j];
- uchar_offset = 0;
+ dat_offset = 0;
if(begin_i != end_i) { /* significant bits occupy >1 unsigned char */
if(k == begin_i)
- dat_len = 8 - (datatype_len - p.precision - p.offset) % 8;
+ dat_len = 8 - (datatype_len - p->precision - p->offset) % 8;
else if(k == end_i) {
- dat_len = 8 - p.offset %8;
- uchar_offset = 8 - dat_len;
+ dat_len = 8 - p->offset % 8;
+ dat_offset = 8 - dat_len;
}
else
dat_len = 8;
} else { /* all significant bits in one unsigned char */
- uchar_offset = p.offset % 8;
- dat_len = p.precision;
+ dat_offset = p->offset % 8;
+ dat_len = p->precision;
}
if(*buf_len > dat_len) {
- data[data_offset + k] =
- ((val >> (*buf_len - dat_len)) & ~(~0 << dat_len)) << uchar_offset;
+ data[data_offset + k] = (unsigned char)(
+ ((unsigned)(val >> (*buf_len - dat_len)) & (unsigned)(~((unsigned)(~0) << dat_len))) << dat_offset);
*buf_len -= dat_len;
} else {
- data[data_offset + k] =
- ((val & ~(~0 << *buf_len)) << (dat_len - *buf_len)) << uchar_offset;
+ data[data_offset + k] = (unsigned char)(
+ ((val & ~((unsigned)(~0) << *buf_len)) << (dat_len - *buf_len)) << dat_offset);
dat_len -= *buf_len;
H5Z_nbit_next_byte(j, buf_len);
- if(dat_len == 0) return;
+ if(dat_len == 0)
+ return;
val = buffer[*j];
- data[data_offset + k] |=
- ((val >> (*buf_len - dat_len)) & ~(~0 << dat_len)) << uchar_offset;
+ data[data_offset + k] |= (unsigned char)(
+ ((unsigned)(val >> (*buf_len - dat_len)) & (unsigned)(~((unsigned)(~0) << dat_len))) << dat_offset);
*buf_len -= dat_len;
}
}
static void
H5Z_nbit_decompress_one_nooptype(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, unsigned size)
+ unsigned char *buffer, size_t *j, size_t *buf_len, unsigned size)
{
unsigned i; /* index */
- unsigned dat_len; /* dat_len is the number of bits to be copied in each data byte */
+ size_t dat_len; /* dat_len is the number of bits to be copied in each data byte */
unsigned char val; /* value to be copied in each data byte */
for(i = 0; i < size; i++) {
@@ -1091,99 +1113,109 @@ H5Z_nbit_decompress_one_nooptype(unsigned char *data, size_t data_offset,
val = buffer[*j];
dat_len = sizeof(unsigned char) * 8;
- data[data_offset + i] = ((val & ~(~0 << *buf_len)) << (dat_len - *buf_len));
+ data[data_offset + i] = (unsigned char)(((val & ~((unsigned)(~0) << *buf_len)) << (dat_len - *buf_len)));
dat_len -= *buf_len;
H5Z_nbit_next_byte(j, buf_len);
- if(dat_len == 0) continue;
+ if(dat_len == 0)
+ continue;
val = buffer[*j];
- data[data_offset + i] |= ((val >> (*buf_len - dat_len)) & ~(~0 << dat_len));
+ data[data_offset + i] |= (unsigned char)((unsigned)(val >> (*buf_len - dat_len)) & (unsigned)(~((unsigned)(~0) << dat_len)));
*buf_len -= dat_len;
}
}
static void
H5Z_nbit_decompress_one_atomic(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, parms_atomic p)
+ unsigned char *buffer, size_t *j, size_t *buf_len, const parms_atomic *p)
{
- /* begin_i: the index of byte having first significant bit
- end_i: the index of byte having last significant bit */
- int k, begin_i, end_i, datatype_len;
-
- datatype_len = p.size * 8;
-
- if(p.order == H5Z_NBIT_ORDER_LE) { /* little endian */
- /* calculate begin_i and end_i */
- if((p.precision + p.offset) % 8 != 0)
- begin_i = (p.precision + p.offset) / 8;
- else
- begin_i = (p.precision + p.offset) / 8 - 1;
- end_i = p.offset / 8;
-
- for(k = begin_i; k >= end_i; k--)
- H5Z_nbit_decompress_one_byte(data, data_offset, k, begin_i, end_i,
- buffer, j, buf_len, p, datatype_len);
- }
-
- if(p.order == H5Z_NBIT_ORDER_BE) { /* big endian */
- /* calculate begin_i and end_i */
- begin_i = (datatype_len - p.precision - p.offset) / 8;
- if(p.offset % 8 != 0)
- end_i = (datatype_len - p.offset) / 8;
- else
- end_i = (datatype_len - p.offset) / 8 - 1;
-
- for(k = begin_i; k <= end_i; k++)
- H5Z_nbit_decompress_one_byte(data, data_offset, k, begin_i, end_i,
- buffer, j, buf_len, p, datatype_len);
- }
+ /* begin_i: the index of byte having first significant bit
+ end_i: the index of byte having last significant bit */
+ int k;
+ unsigned begin_i, end_i;
+ size_t datatype_len;
+
+ datatype_len = p->size * 8;
+
+ if(p->order == H5Z_NBIT_ORDER_LE) { /* little endian */
+ /* calculate begin_i and end_i */
+ if((p->precision + p->offset) % 8 != 0)
+ begin_i = (p->precision + p->offset) / 8;
+ else
+ begin_i = (p->precision + p->offset) / 8 - 1;
+ end_i = p->offset / 8;
+
+ for(k = (int)begin_i; k >= (int)end_i; k--)
+ H5Z_nbit_decompress_one_byte(data, data_offset, (unsigned)k, begin_i, end_i,
+ buffer, j, buf_len, p, datatype_len);
+ }
+ else { /* big endian */
+ /* Sanity check */
+ HDassert(p->order == H5Z_NBIT_ORDER_BE);
+
+ /* calculate begin_i and end_i */
+ begin_i = ((unsigned)datatype_len - p->precision - p->offset) / 8;
+ if(p->offset % 8 != 0)
+ end_i = ((unsigned)datatype_len - p->offset) / 8;
+ else
+ end_i = ((unsigned)datatype_len - p->offset) / 8 - 1;
+
+ for(k = (int)begin_i; k <= (int)end_i; k++)
+ H5Z_nbit_decompress_one_byte(data, data_offset, (unsigned)k, begin_i, end_i,
+ buffer, j, buf_len, p, datatype_len);
+ }
}
static void
H5Z_nbit_decompress_one_array(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, const unsigned parms[])
+ unsigned char *buffer, size_t *j, size_t *buf_len, const unsigned parms[],
+ unsigned *parms_index)
{
unsigned i, total_size, base_class, base_size, n, begin_index;
parms_atomic p;
- total_size = parms[parms_index++];
- base_class = parms[parms_index++];
+ total_size = parms[(*parms_index)++];
+ base_class = parms[(*parms_index)++];
switch(base_class) {
case H5Z_NBIT_ATOMIC:
- p.size = parms[parms_index++];
- p.order = parms[parms_index++];
- p.precision = parms[parms_index++];
- p.offset = parms[parms_index++];
- n = total_size/p.size;
+ p.size = parms[(*parms_index)++];
+ p.order = parms[(*parms_index)++];
+ p.precision = parms[(*parms_index)++];
+ p.offset = parms[(*parms_index)++];
+ n = total_size / p.size;
for(i = 0; i < n; i++)
- H5Z_nbit_decompress_one_atomic(data, data_offset + i*p.size,
- buffer, j, buf_len, p);
+ H5Z_nbit_decompress_one_atomic(data, data_offset + i * p.size,
+ buffer, j, buf_len, &p);
break;
+
case H5Z_NBIT_ARRAY:
- base_size = parms[parms_index]; /* read in advance */
- n = total_size/base_size; /* number of base_type elements inside the array datatype */
- begin_index = parms_index;
+ base_size = parms[*parms_index]; /* read in advance */
+ n = total_size / base_size; /* number of base_type elements inside the array datatype */
+ begin_index = *parms_index;
for(i = 0; i < n; i++) {
- H5Z_nbit_decompress_one_array(data, data_offset + i*base_size,
- buffer, j, buf_len, parms);
- parms_index = begin_index;
+ H5Z_nbit_decompress_one_array(data, data_offset + i * base_size,
+ buffer, j, buf_len, parms, parms_index);
+ *parms_index = begin_index;
}
break;
+
case H5Z_NBIT_COMPOUND:
- base_size = parms[parms_index]; /* read in advance */
- n = total_size/base_size; /* number of base_type elements inside the array datatype */
- begin_index = parms_index;
+ base_size = parms[*parms_index]; /* read in advance */
+ n = total_size / base_size; /* number of base_type elements inside the array datatype */
+ begin_index = *parms_index;
for(i = 0; i < n; i++) {
- H5Z_nbit_decompress_one_compound(data, data_offset + i*base_size,
- buffer, j, buf_len, parms);
- parms_index = begin_index;
+ H5Z_nbit_decompress_one_compound(data, data_offset + i * base_size,
+ buffer, j, buf_len, parms, parms_index);
+ *parms_index = begin_index;
}
break;
+
case H5Z_NBIT_NOOPTYPE:
- parms_index++; /* skip size of no-op type */
+ (*parms_index)++; /* skip size of no-op type */
H5Z_nbit_decompress_one_nooptype(data, data_offset, buffer, j, buf_len, total_size);
break;
+
default:
HDassert(0 && "This Should never be executed!");
} /* end switch */
@@ -1191,39 +1223,44 @@ H5Z_nbit_decompress_one_array(unsigned char *data, size_t data_offset,
static void
H5Z_nbit_decompress_one_compound(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, const unsigned parms[])
+ unsigned char *buffer, size_t *j, size_t *buf_len, const unsigned parms[],
+ unsigned *parms_index)
{
unsigned i, nmembers, member_offset, member_class, size;
parms_atomic p;
- parms_index++; /* skip total size of compound datatype */
- nmembers = parms[parms_index++];
+ (*parms_index)++; /* skip total size of compound datatype */
+ nmembers = parms[(*parms_index)++];
for(i = 0; i < nmembers; i++) {
- member_offset = parms[parms_index++];
- member_class = parms[parms_index++];
+ member_offset = parms[(*parms_index)++];
+ member_class = parms[(*parms_index)++];
switch(member_class) {
case H5Z_NBIT_ATOMIC:
- p.size = parms[parms_index++];
- p.order = parms[parms_index++];
- p.precision = parms[parms_index++];
- p.offset = parms[parms_index++];
+ p.size = parms[(*parms_index)++];
+ p.order = parms[(*parms_index)++];
+ p.precision = parms[(*parms_index)++];
+ p.offset = parms[(*parms_index)++];
H5Z_nbit_decompress_one_atomic(data, data_offset + member_offset,
- buffer, j, buf_len, p);
+ buffer, j, buf_len, &p);
break;
+
case H5Z_NBIT_ARRAY:
H5Z_nbit_decompress_one_array(data, data_offset + member_offset,
- buffer, j, buf_len, parms);
+ buffer, j, buf_len, parms, parms_index);
break;
+
case H5Z_NBIT_COMPOUND:
H5Z_nbit_decompress_one_compound(data, data_offset+member_offset,
- buffer, j, buf_len, parms);
+ buffer, j, buf_len, parms, parms_index);
break;
+
case H5Z_NBIT_NOOPTYPE:
- size = parms[parms_index++];
+ size = parms[(*parms_index)++];
H5Z_nbit_decompress_one_nooptype(data, data_offset+member_offset,
buffer, j, buf_len, size);
break;
+
default:
HDassert(0 && "This Should never be executed!");
} /* end switch */
@@ -1232,94 +1269,101 @@ H5Z_nbit_decompress_one_compound(unsigned char *data, size_t data_offset,
static void
H5Z_nbit_decompress(unsigned char *data, unsigned d_nelmts, unsigned char *buffer,
- const unsigned parms[])
+ const unsigned parms[])
{
- /* i: index of data, j: index of buffer,
- buf_len: number of bits to be filled in current byte */
- size_t i, j, size;
- int buf_len;
- parms_atomic p;
+ /* i: index of data, j: index of buffer,
+ buf_len: number of bits to be filled in current byte */
+ unsigned i;
+ size_t j, size;
+ size_t buf_len;
+ parms_atomic p;
+ unsigned parms_index; /* index in array parms used by compression/decompression functions */
+
+ /* may not have to initialize to zeros */
+ HDmemset(data, 0, d_nelmts * parms[4]);
+
+ /* initialization before the loop */
+ j = 0;
+ buf_len = sizeof(unsigned char) * 8;
+
+ switch(parms[3]) {
+ case H5Z_NBIT_ATOMIC:
+ p.size = parms[4];
+ p.order = parms[5];
+ p.precision = parms[6];
+ p.offset = parms[7];
+ for(i = 0; i < d_nelmts; i++)
+ H5Z_nbit_decompress_one_atomic(data, i * p.size, buffer, &j, &buf_len, &p);
+ break;
- /* may not have to initialize to zeros */
- for(i = 0; i < d_nelmts*parms[4]; i++) data[i] = 0;
+ case H5Z_NBIT_ARRAY:
+ size = parms[4];
+ parms_index = 4; /* set the index before goto function call */
+ for(i = 0; i < d_nelmts; i++) {
+ H5Z_nbit_decompress_one_array(data, i * size, buffer, &j, &buf_len, parms, &parms_index);
+ parms_index = 4;
+ }
+ break;
- /* initialization before the loop */
- j = 0;
- buf_len = sizeof(unsigned char) * 8;
+ case H5Z_NBIT_COMPOUND:
+ size = parms[4];
+ parms_index = 4; /* set the index before goto function call */
+ for(i = 0; i < d_nelmts; i++) {
+ H5Z_nbit_decompress_one_compound(data, i * size, buffer, &j, &buf_len, parms, &parms_index);
+ parms_index = 4;
+ }
+ break;
- switch(parms[3]) {
- case H5Z_NBIT_ATOMIC:
- /* set the index before goto function call */
- p.size = parms[4];
- p.order = parms[5];
- p.precision = parms[6];
- p.offset = parms[7];
- for(i = 0; i < d_nelmts; i++) {
- H5Z_nbit_decompress_one_atomic(data, i*p.size, buffer, &j, &buf_len, p);
- }
- break;
- case H5Z_NBIT_ARRAY:
- size = parms[4];
- parms_index = 4;
- for(i = 0; i < d_nelmts; i++) {
- H5Z_nbit_decompress_one_array(data, i*size, buffer, &j, &buf_len, parms);
- parms_index = 4;
- }
- break;
- case H5Z_NBIT_COMPOUND:
- size = parms[4];
- parms_index = 4;
- for(i = 0; i < d_nelmts; i++) {
- H5Z_nbit_decompress_one_compound(data, i*size, buffer, &j, &buf_len, parms);
- parms_index = 4;
- }
- break;
- default:
- HDassert(0 && "This Should never be executed!");
- } /* end switch */
+ default:
+ HDassert(0 && "This Should never be executed!");
+ } /* end switch */
}
-static void H5Z_nbit_compress_one_byte(unsigned char *data, size_t data_offset, int k, int begin_i,
-int end_i, unsigned char *buffer, size_t *j, int *buf_len, parms_atomic p, int datatype_len)
+static void
+H5Z_nbit_compress_one_byte(unsigned char *data, size_t data_offset, unsigned k,
+ unsigned begin_i, unsigned end_i, unsigned char *buffer, size_t *j, size_t *buf_len,
+ const parms_atomic *p, size_t datatype_len)
{
- int dat_len; /* dat_len is the number of bits to be copied in each data byte */
+ size_t dat_len; /* dat_len is the number of bits to be copied in each data byte */
unsigned char val; /* value to be copied in each data byte */
/* initialize value and bits of unsigned char to be copied */
val = data[data_offset + k];
if(begin_i != end_i) { /* significant bits occupy >1 unsigned char */
if(k == begin_i)
- dat_len = 8 - (datatype_len - p.precision - p.offset) % 8;
+ dat_len = 8 - (datatype_len - p->precision - p->offset) % 8;
else if(k == end_i) {
- dat_len = 8 - p.offset % 8;
- val >>= 8 - dat_len;
+ dat_len = 8 - p->offset % 8;
+ val = (unsigned char)(val >> (8 - dat_len));
}
else
dat_len = 8;
} else { /* all significant bits in one unsigned char */
- val >>= p.offset % 8;
- dat_len = p.precision;
+ val = (unsigned char)(val >> (p->offset % 8));
+ dat_len = p->precision;
}
if(*buf_len > dat_len) {
- buffer[*j] |= (val & ~(~0 << dat_len)) << (*buf_len - dat_len);
+ buffer[*j] |= (unsigned char)((val & ~((unsigned)(~0) << dat_len)) << (*buf_len - dat_len));
*buf_len -= dat_len;
} else {
- buffer[*j] |= (val >> (dat_len - *buf_len)) & ~(~0 << *buf_len);
+ buffer[*j] |= (unsigned char)((unsigned)(val >> (dat_len - *buf_len)) & ~((unsigned)(~0) << *buf_len));
dat_len -= *buf_len;
H5Z_nbit_next_byte(j, buf_len);
- if(dat_len == 0) return;
+ if(dat_len == 0)
+ return;
- buffer[*j] = (val & ~(~0 << dat_len)) << (*buf_len - dat_len);
+ buffer[*j] = (unsigned char)((val & ~((unsigned)(~0) << dat_len)) << (*buf_len - dat_len));
*buf_len -= dat_len;
}
}
-static void H5Z_nbit_compress_one_nooptype(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, unsigned size)
+static void
+H5Z_nbit_compress_one_nooptype(unsigned char *data, size_t data_offset,
+ unsigned char *buffer, size_t *j, size_t *buf_len, unsigned size)
{
unsigned i; /* index */
- unsigned dat_len; /* dat_len is the number of bits to be copied in each data byte */
+ size_t dat_len; /* dat_len is the number of bits to be copied in each data byte */
unsigned char val; /* value to be copied in each data byte */
for(i = 0; i < size; i++) {
@@ -1327,192 +1371,214 @@ static void H5Z_nbit_compress_one_nooptype(unsigned char *data, size_t data_offs
val = data[data_offset + i];
dat_len = sizeof(unsigned char) * 8;
- buffer[*j] |= (val >> (dat_len - *buf_len)) & ~(~0 << *buf_len);
+ buffer[*j] |= (unsigned char)((unsigned)(val >> (dat_len - *buf_len)) & ~((unsigned)(~0) << *buf_len));
dat_len -= *buf_len;
H5Z_nbit_next_byte(j, buf_len);
- if(dat_len == 0) continue;
+ if(dat_len == 0)
+ continue;
- buffer[*j] = (val & ~(~0 << dat_len)) << (*buf_len - dat_len);
+ buffer[*j] = (unsigned char)((val & ~((unsigned)(~0) << dat_len)) << (*buf_len - dat_len));
*buf_len -= dat_len;
}
}
-static void H5Z_nbit_compress_one_atomic(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, parms_atomic p)
+static void
+H5Z_nbit_compress_one_atomic(unsigned char *data, size_t data_offset,
+ unsigned char *buffer, size_t *j, size_t *buf_len, const parms_atomic *p)
{
- /* begin_i: the index of byte having first significant bit
- end_i: the index of byte having last significant bit */
- int k, begin_i, end_i, datatype_len;
-
- datatype_len = p.size * 8;
-
- if(p.order == H5Z_NBIT_ORDER_LE) { /* little endian */
- /* calculate begin_i and end_i */
- if((p.precision + p.offset) % 8 != 0)
- begin_i = (p.precision + p.offset) / 8;
- else
- begin_i = (p.precision + p.offset) / 8 - 1;
- end_i = p.offset / 8;
-
- for(k = begin_i; k >= end_i; k--)
- H5Z_nbit_compress_one_byte(data, data_offset, k, begin_i, end_i,
- buffer, j, buf_len, p, datatype_len);
- }
-
- if(p.order == H5Z_NBIT_ORDER_BE) { /* big endian */
- /* calculate begin_i and end_i */
- begin_i = (datatype_len - p.precision - p.offset) / 8;
- if(p.offset % 8 != 0)
- end_i = (datatype_len - p.offset) / 8;
- else
- end_i = (datatype_len - p.offset) / 8 - 1;
-
- for(k = begin_i; k <= end_i; k++)
- H5Z_nbit_compress_one_byte(data, data_offset, k, begin_i, end_i,
- buffer, j, buf_len, p, datatype_len);
- }
+ /* begin_i: the index of byte having first significant bit
+ end_i: the index of byte having last significant bit */
+ int k;
+ unsigned begin_i, end_i;
+ size_t datatype_len;
+
+ datatype_len = p->size * 8;
+
+ if(p->order == H5Z_NBIT_ORDER_LE) { /* little endian */
+ /* calculate begin_i and end_i */
+ if((p->precision + p->offset) % 8 != 0)
+ begin_i = (p->precision + p->offset) / 8;
+ else
+ begin_i = (p->precision + p->offset) / 8 - 1;
+ end_i = p->offset / 8;
+
+ for(k = (int)begin_i; k >= (int)end_i; k--)
+ H5Z_nbit_compress_one_byte(data, data_offset, (unsigned)k, begin_i, end_i,
+ buffer, j, buf_len, p, datatype_len);
+ }
+ else { /* big endian */
+ /* Sanity check */
+ HDassert(p->order == H5Z_NBIT_ORDER_BE);
+
+ /* calculate begin_i and end_i */
+ begin_i = ((unsigned)datatype_len - p->precision - p->offset) / 8;
+ if(p->offset % 8 != 0)
+ end_i = ((unsigned)datatype_len - p->offset) / 8;
+ else
+ end_i = ((unsigned)datatype_len - p->offset) / 8 - 1;
+
+ for(k = (int)begin_i; k <= (int)end_i; k++)
+ H5Z_nbit_compress_one_byte(data, data_offset, (unsigned)k, begin_i, end_i,
+ buffer, j, buf_len, p, datatype_len);
+ }
}
-static void H5Z_nbit_compress_one_array(unsigned char *data, size_t data_offset, unsigned char *buffer,
- size_t *j, int *buf_len, const unsigned parms[])
+static void
+H5Z_nbit_compress_one_array(unsigned char *data, size_t data_offset,
+ unsigned char *buffer, size_t *j, size_t *buf_len, const unsigned parms[],
+ unsigned *parms_index)
{
unsigned i, total_size, base_class, base_size, n, begin_index;
parms_atomic p;
- total_size = parms[parms_index++];
- base_class = parms[parms_index++];
+ total_size = parms[(*parms_index)++];
+ base_class = parms[(*parms_index)++];
switch(base_class) {
case H5Z_NBIT_ATOMIC:
- p.size = parms[parms_index++];
- p.order = parms[parms_index++];
- p.precision = parms[parms_index++];
- p.offset = parms[parms_index++];
- n = total_size/p.size;
+ p.size = parms[(*parms_index)++];
+ p.order = parms[(*parms_index)++];
+ p.precision = parms[(*parms_index)++];
+ p.offset = parms[(*parms_index)++];
+ n = total_size / p.size;
for(i = 0; i < n; i++)
- H5Z_nbit_compress_one_atomic(data, data_offset + i*p.size,
- buffer, j, buf_len, p);
+ H5Z_nbit_compress_one_atomic(data, data_offset + i * p.size,
+ buffer, j, buf_len, &p);
break;
+
case H5Z_NBIT_ARRAY:
- base_size = parms[parms_index]; /* read in advance */
- n = total_size/base_size; /* number of base_type elements inside the array datatype */
- begin_index = parms_index;
+ base_size = parms[*parms_index]; /* read in advance */
+ n = total_size / base_size; /* number of base_type elements inside the array datatype */
+ begin_index = *parms_index;
for(i = 0; i < n; i++) {
- H5Z_nbit_compress_one_array(data, data_offset + i*base_size,
- buffer, j, buf_len, parms);
- parms_index = begin_index;
+ H5Z_nbit_compress_one_array(data, data_offset + i * base_size,
+ buffer, j, buf_len, parms, parms_index);
+ *parms_index = begin_index;
}
break;
+
case H5Z_NBIT_COMPOUND:
- base_size = parms[parms_index]; /* read in advance */
- n = total_size/base_size; /* number of base_type elements inside the array datatype */
- begin_index = parms_index;
+ base_size = parms[*parms_index]; /* read in advance */
+ n = total_size / base_size; /* number of base_type elements inside the array datatype */
+ begin_index = *parms_index;
for(i = 0; i < n; i++) {
- H5Z_nbit_compress_one_compound(data, data_offset + i*base_size,
- buffer, j, buf_len, parms);
- parms_index = begin_index;
+ H5Z_nbit_compress_one_compound(data, data_offset + i * base_size,
+ buffer, j, buf_len, parms, parms_index);
+ *parms_index = begin_index;
}
break;
+
case H5Z_NBIT_NOOPTYPE:
- parms_index++; /* skip size of no-op type */
+ (*parms_index)++; /* skip size of no-op type */
H5Z_nbit_compress_one_nooptype(data, data_offset, buffer, j, buf_len, total_size);
break;
+
default:
HDassert(0 && "This Should never be executed!");
} /* end switch */
}
-static void H5Z_nbit_compress_one_compound(unsigned char *data, size_t data_offset,
- unsigned char *buffer, size_t *j, int *buf_len, const unsigned parms[])
+static void
+H5Z_nbit_compress_one_compound(unsigned char *data, size_t data_offset,
+ unsigned char *buffer, size_t *j, size_t *buf_len, const unsigned parms[],
+ unsigned *parms_index)
{
unsigned i, nmembers, member_offset, member_class, size;
parms_atomic p;
- parms_index++; /* skip size of compound datatype */
- nmembers = parms[parms_index++];
+ (*parms_index)++; /* skip size of compound datatype */
+ nmembers = parms[(*parms_index)++];
for(i = 0; i < nmembers; i++) {
- member_offset = parms[parms_index++];
- member_class = parms[parms_index++];
+ member_offset = parms[(*parms_index)++];
+ member_class = parms[(*parms_index)++];
switch(member_class) {
case H5Z_NBIT_ATOMIC:
- p.size = parms[parms_index++];
- p.order = parms[parms_index++];
- p.precision = parms[parms_index++];
- p.offset = parms[parms_index++];
+ p.size = parms[(*parms_index)++];
+ p.order = parms[(*parms_index)++];
+ p.precision = parms[(*parms_index)++];
+ p.offset = parms[(*parms_index)++];
H5Z_nbit_compress_one_atomic(data, data_offset + member_offset,
- buffer, j, buf_len, p);
+ buffer, j, buf_len, &p);
break;
+
case H5Z_NBIT_ARRAY:
H5Z_nbit_compress_one_array(data, data_offset + member_offset,
- buffer, j, buf_len, parms);
+ buffer, j, buf_len, parms, parms_index);
break;
+
case H5Z_NBIT_COMPOUND:
H5Z_nbit_compress_one_compound(data, data_offset+member_offset,
- buffer, j, buf_len, parms);
+ buffer, j, buf_len, parms, parms_index);
break;
+
case H5Z_NBIT_NOOPTYPE:
- size = parms[parms_index++];
+ size = parms[(*parms_index)++];
H5Z_nbit_compress_one_nooptype(data, data_offset+member_offset,
buffer, j, buf_len, size);
break;
+
default:
HDassert(0 && "This Should never be executed!");
} /* end switch */
}
}
-static void H5Z_nbit_compress(unsigned char *data, unsigned d_nelmts, unsigned char *buffer,
- size_t *buffer_size, const unsigned parms[])
+static void
+H5Z_nbit_compress(unsigned char *data, unsigned d_nelmts, unsigned char *buffer,
+ size_t *buffer_size, const unsigned parms[])
{
- /* i: index of data, new_size: index of buffer,
- buf_len: number of bits to be filled in current byte */
- size_t i, size;
- size_t new_size = 0;
- int buf_len;
- parms_atomic p;
+ /* i: index of data, new_size: index of buffer,
+ buf_len: number of bits to be filled in current byte */
+ unsigned i;
+ size_t size;
+ size_t new_size = 0;
+ size_t buf_len;
+ parms_atomic p;
+ unsigned parms_index; /* index in array parms used by compression/decompression functions */
+
+ /* must initialize buffer to be zeros */
+ HDmemset(buffer, 0, *buffer_size);
+
+ /* initialization before the loop */
+ buf_len = sizeof(unsigned char) * 8;
+
+ switch(parms[3]) {
+ case H5Z_NBIT_ATOMIC:
+ p.size = parms[4];
+ p.order = parms[5];
+ p.precision = parms[6];
+ p.offset = parms[7];
+
+ for(i = 0; i < d_nelmts; i++)
+ H5Z_nbit_compress_one_atomic(data, i * p.size, buffer, &new_size, &buf_len, &p);
+ break;
- /* must initialize buffer to be zeros */
- HDmemset(buffer, 0, *buffer_size);
+ case H5Z_NBIT_ARRAY:
+ size = parms[4];
+ parms_index = 4;
+ for(i = 0; i < d_nelmts; i++) {
+ H5Z_nbit_compress_one_array(data, i * size, buffer, &new_size, &buf_len, parms, &parms_index);
+ parms_index = 4;
+ }
+ break;
- /* initialization before the loop */
- buf_len = sizeof(unsigned char) * 8;
+ case H5Z_NBIT_COMPOUND:
+ size = parms[4];
+ parms_index = 4;
+ for(i = 0; i < d_nelmts; i++) {
+ H5Z_nbit_compress_one_compound(data, i * size, buffer, &new_size, &buf_len, parms, &parms_index);
+ parms_index = 4;
+ }
+ break;
- switch(parms[3]) {
- case H5Z_NBIT_ATOMIC:
- /* set the index before goto function call */
- p.size = parms[4];
- p.order = parms[5];
- p.precision = parms[6];
- p.offset = parms[7];
-
- for(i = 0; i < d_nelmts; i++) {
- H5Z_nbit_compress_one_atomic(data, i*p.size, buffer, &new_size, &buf_len, p);
- }
- break;
- case H5Z_NBIT_ARRAY:
- size = parms[4];
- parms_index = 4;
- for(i = 0; i < d_nelmts; i++) {
- H5Z_nbit_compress_one_array(data, i*size, buffer, &new_size, &buf_len, parms);
- parms_index = 4;
- }
- break;
- case H5Z_NBIT_COMPOUND:
- size = parms[4];
- parms_index = 4;
- for(i = 0; i < d_nelmts; i++) {
- H5Z_nbit_compress_one_compound(data, i*size, buffer, &new_size, &buf_len, parms);
- parms_index = 4;
- }
- break;
- default:
- HDassert(0 && "This Should never be executed!");
- } /* end switch */
+ default:
+ HDassert(0 && "This Should never be executed!");
+ } /* end switch */
- /* Update the size to the new value after compression. If there are any bits hanging over in
- * the last byte, increment the value by 1. */
- *buffer_size = new_size + 1;
+ /* Update the size to the new value after compression. If there are any bits hanging over in
+ * the last byte, increment the value by 1. */
+ *buffer_size = new_size + 1;
}
diff --git a/src/H5Zscaleoffset.c b/src/H5Zscaleoffset.c
index 7c9dbf9..1cca9b1 100644
--- a/src/H5Zscaleoffset.c
+++ b/src/H5Zscaleoffset.c
@@ -28,7 +28,7 @@
/* Struct of parameters needed for compressing/decompressing one atomic datatype */
typedef struct {
- size_t size; /* datatype size */
+ unsigned size; /* datatype size */
uint32_t minbits; /* minimum bits to compress one value of such datatype */
unsigned mem_order; /* current memory endianness order */
} parms_atomic;
@@ -38,7 +38,6 @@ enum H5Z_scaleoffset_t {t_bad=0, t_uchar=1, t_ushort, t_uint, t_ulong, t_ulong_l
t_float, t_double};
/* Local function prototypes */
-static double H5Z_scaleoffset_rnd(double val);
static htri_t H5Z_can_apply_scaleoffset(hid_t dcpl_id, hid_t type_id, hid_t space_id);
static enum H5Z_scaleoffset_t H5Z_scaleoffset_get_type(unsigned dtype_class,
unsigned dtype_size, unsigned dtype_sign);
@@ -48,7 +47,7 @@ static herr_t H5Z_scaleoffset_set_parms_fillval(H5P_genplist_t *dcpl_plist,
static herr_t H5Z_set_local_scaleoffset(hid_t dcpl_id, hid_t type_id, hid_t space_id);
static size_t H5Z_filter_scaleoffset(unsigned flags, size_t cd_nelmts,
const unsigned cd_values[], size_t nbytes, size_t *buf_size, void **buf);
-static void H5Z_scaleoffset_convert(void *buf, unsigned d_nelmts, size_t dtype_size);
+static void H5Z_scaleoffset_convert(void *buf, unsigned d_nelmts, unsigned dtype_size);
static H5_ATTR_CONST unsigned H5Z_scaleoffset_log2(unsigned long long num);
static void H5Z_scaleoffset_precompress_i(void *data, unsigned d_nelmts,
enum H5Z_scaleoffset_t type, unsigned filavail, const unsigned cd_values[],
@@ -389,25 +388,28 @@ H5Z_class2_t H5Z_SCALEOFFSET[1] = {{
}
/* Check and handle special situation for floating-point type */
-#define H5Z_scaleoffset_check_3(i, type, max, min, minbits, D_val) \
+#define H5Z_scaleoffset_check_3(i, type, pow_fun, round_fun, max, min, minbits, D_val) \
{ \
- if(sizeof(type)==sizeof(int)) { \
- if(H5Z_scaleoffset_rnd(max*HDpow(10.0f, D_val) - min*HDpow(10.0f, D_val)) \
- > HDpow(2.0f, (double)(sizeof(int)*8 - 1))) { \
- *minbits = sizeof(int)*8; goto done; \
- } \
- } else if(sizeof(type)==sizeof(long)) { \
- if(H5Z_scaleoffset_rnd(max*HDpow(10.0f, D_val) - min*HDpow(10.0f, D_val)) \
- > HDpow(2.0f, (double)(sizeof(long)*8 - 1))) { \
- *minbits = sizeof(long)*8; goto done; \
- } \
- } else if(sizeof(type)==sizeof(long long)) { \
- if(H5Z_scaleoffset_rnd(max*HDpow(10.0f, D_val) - min*HDpow(10.0f, D_val)) \
- > HDpow(2.0f, (double)(sizeof(long long)*8 - 1))) { \
- *minbits = sizeof(long long)*8; goto done; \
- } \
- } else \
- HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "cannot find matched integer dataype")\
+ if(sizeof(type) == sizeof(int)) { \
+ if(round_fun(max * pow_fun(10.0f, (type)D_val) - min * pow_fun(10.0f, (type)D_val)) \
+ > pow_fun(2.0f, (type)(sizeof(int) * 8 - 1))) { \
+ *minbits = sizeof(int) * 8; \
+ goto done; \
+ } \
+ } else if(sizeof(type) == sizeof(long)) { \
+ if(round_fun(max * pow_fun(10.0f, (type)D_val) - min * pow_fun(10.0f, (type)D_val)) \
+ > pow_fun(2.0f, (type)(sizeof(long) * 8 - 1))) { \
+ *minbits = sizeof(long) * 8; \
+ goto done; \
+ } \
+ } else if(sizeof(type) == sizeof(long long)) { \
+ if(round_fun(max * pow_fun(10.0f, (type)D_val) - min * pow_fun(10.0f, (type)D_val)) \
+ > pow_fun(2.0f, (type)(sizeof(long long) * 8 - 1))) { \
+ *minbits = sizeof(long long) * 8; \
+ goto done; \
+ } \
+ } else \
+ HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "cannot find matched integer dataype") \
}
/* Precompress for unsigned integer type */
@@ -477,53 +479,47 @@ H5Z_class2_t H5Z_SCALEOFFSET[1] = {{
}
/* Modify values of data in precompression if fill value defined for floating-point type */
-#define H5Z_scaleoffset_modify_1(i, type, buf, d_nelmts, filval, minbits, min, D_val) \
+#define H5Z_scaleoffset_modify_1(i, type, pow_fun, abs_fun, lround_fun, llround_fun, buf, d_nelmts, filval, minbits, min, D_val) \
{ \
- if(sizeof(type)==sizeof(int)) \
+ if(sizeof(type) == sizeof(int)) \
for(i = 0; i < d_nelmts; i++) { \
- if(HDfabs(buf[i] - filval) < HDpow(10.0f, -D_val)) \
+ if(abs_fun(buf[i] - filval) < pow_fun(10.0f, (type)-D_val)) \
*(int *)&buf[i] = (int)(((unsigned int)1 << *minbits) - 1); \
else \
- *(int *)&buf[i] = H5Z_scaleoffset_rnd( \
- buf[i]*HDpow(10.0f, D_val) - min*HDpow(10.0f, D_val)); \
+ *(int *)&buf[i] = (int)lround_fun(buf[i] * pow_fun(10.0f, (type)D_val) - min * pow_fun(10.0f, (type)D_val)); \
} \
- else if(sizeof(type)==sizeof(long)) \
+ else if(sizeof(type) == sizeof(long)) \
for(i = 0; i < d_nelmts; i++) { \
- if(HDfabs(buf[i] - filval) < HDpow(10.0f, -D_val)) \
+ if(abs_fun(buf[i] - filval) < pow_fun(10.0f, (type)-D_val)) \
*(long *)&buf[i] = (long)(((unsigned long)1 << *minbits) - 1); \
else \
- *(long *)&buf[i] = H5Z_scaleoffset_rnd( \
- buf[i]*HDpow(10.0f, D_val) - min*HDpow(10.0f, D_val)); \
+ *(long *)&buf[i] = lround_fun(buf[i] * pow_fun(10.0f, (type)D_val) - min * pow_fun(10.0f, (type)D_val)); \
} \
- else if(sizeof(type)==sizeof(long long)) \
+ else if(sizeof(type) == sizeof(long long)) \
for(i = 0; i < d_nelmts; i++) { \
- if(HDfabs(buf[i] - filval) < HDpow(10.0f, -D_val)) \
+ if(abs_fun(buf[i] - filval) < pow_fun(10.0f, (type)-D_val)) \
*(long long *)&buf[i] = (long long)(((unsigned long long)1 << *minbits) - 1); \
else \
- *(long long *)&buf[i] = H5Z_scaleoffset_rnd( \
- buf[i]*HDpow(10.0f, D_val) - min*HDpow(10.0f, D_val)); \
+ *(long long *)&buf[i] = llround_fun(buf[i] * pow_fun(10.0f, (type)D_val) - min * pow_fun(10.0f, (type)D_val)); \
} \
else \
- HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "cannot find matched integer dataype")\
+ HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "cannot find matched integer dataype") \
}
/* Modify values of data in precompression if fill value undefined for floating-point type */
-#define H5Z_scaleoffset_modify_2(i, type, buf, d_nelmts, min, D_val) \
+#define H5Z_scaleoffset_modify_2(i, type, pow_fun, lround_fun, llround_fun, buf, d_nelmts, min, D_val) \
{ \
- if(sizeof(type)==sizeof(int)) \
+ if(sizeof(type) == sizeof(int)) \
for(i = 0; i < d_nelmts; i++) \
- *(int *)&buf[i] = H5Z_scaleoffset_rnd( \
- buf[i]*HDpow(10.0f, D_val) - min*HDpow(10.0f, D_val)); \
- else if(sizeof(type)==sizeof(long)) \
+ *(int *)&buf[i] = (int)lround_fun(buf[i] * pow_fun(10.0f, (type)D_val) - min * pow_fun(10.0f, (type)D_val)); \
+ else if(sizeof(type) == sizeof(long)) \
for(i = 0; i < d_nelmts; i++) \
- *(long *)&buf[i] = H5Z_scaleoffset_rnd( \
- buf[i]*HDpow(10.0f, D_val) - min*HDpow(10.0f, D_val)); \
- else if(sizeof(type)==sizeof(long long)) \
+ *(long *)&buf[i] = lround_fun(buf[i] * pow_fun(10.0f, (type)D_val) - min * pow_fun(10.0f, (type)D_val)); \
+ else if(sizeof(type) == sizeof(long long)) \
for(i = 0; i < d_nelmts; i++) \
- *(long long *)&buf[i] = H5Z_scaleoffset_rnd( \
- buf[i]*HDpow(10.0f, D_val) - min*HDpow(10.0f, D_val)); \
+ *(long long *)&buf[i] = llround_fun(buf[i] * pow_fun(10.0f, (type)D_val) - min * pow_fun(10.0f, (type)D_val)); \
else \
- HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "cannot find matched integer dataype")\
+ HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "cannot find matched integer dataype") \
}
/* Save the minimum value for floating-point type */
@@ -546,7 +542,7 @@ H5Z_class2_t H5Z_SCALEOFFSET[1] = {{
}
/* Precompress for floating-point type using variable-minimum-bits method */
-#define H5Z_scaleoffset_precompress_3(type, data, d_nelmts, filavail, cd_values, \
+#define H5Z_scaleoffset_precompress_3(type, pow_fun, abs_fun, round_fun, lround_fun, llround_fun, data, d_nelmts, filavail, cd_values, \
minbits, minval, D_val) \
{ \
type *buf = (type *)data, min = 0, max = 0, filval = 0; \
@@ -557,18 +553,18 @@ H5Z_class2_t H5Z_SCALEOFFSET[1] = {{
if(filavail == H5Z_SCALEOFFSET_FILL_DEFINED) { /* fill value defined */ \
H5Z_scaleoffset_get_filval_2(type, cd_values, filval) \
H5Z_scaleoffset_max_min_3(i, d_nelmts, buf, filval, max, min, D_val) \
- H5Z_scaleoffset_check_3(i, type, max, min, minbits, D_val) \
- span = H5Z_scaleoffset_rnd(max * HDpow(10.0f, D_val) - min * HDpow(10.0f, D_val)) + 1; \
- *minbits = H5Z_scaleoffset_log2((unsigned long long)(span + 1)); \
+ H5Z_scaleoffset_check_3(i, type, pow_fun, round_fun, max, min, minbits, D_val) \
+ span = (unsigned long long)(llround_fun(max * pow_fun(10.0f, (type)D_val) - min * pow_fun(10.0f, (type)D_val)) + 1); \
+ *minbits = H5Z_scaleoffset_log2(span + 1); \
if(*minbits != sizeof(type) * 8) /* change values if minbits != full precision */ \
- H5Z_scaleoffset_modify_1(i, type, buf, d_nelmts, filval, minbits, min, D_val) \
+ H5Z_scaleoffset_modify_1(i, type, pow_fun, abs_fun, lround_fun, llround_fun, buf, d_nelmts, filval, minbits, min, D_val) \
} else { /* fill value undefined */ \
H5Z_scaleoffset_max_min_2(i, d_nelmts, buf, max, min) \
- H5Z_scaleoffset_check_3(i, type, max, min, minbits, D_val) \
- span = H5Z_scaleoffset_rnd(max * HDpow(10.0f, D_val) - min * HDpow(10.0f, D_val)) + 1; \
- *minbits = H5Z_scaleoffset_log2((unsigned long long)span); \
+ H5Z_scaleoffset_check_3(i, type, pow_fun, round_fun, max, min, minbits, D_val) \
+ span = (unsigned long long)(llround_fun(max * pow_fun(10.0f, (type)D_val) - min * pow_fun(10.0f, (type)D_val)) + 1); \
+ *minbits = H5Z_scaleoffset_log2(span); \
if(*minbits != sizeof(type) * 8) /* change values if minbits != full precision */ \
- H5Z_scaleoffset_modify_2(i, type, buf, d_nelmts, min, D_val) \
+ H5Z_scaleoffset_modify_2(i, type, pow_fun, lround_fun, llround_fun, buf, d_nelmts, min, D_val) \
} \
H5Z_scaleoffset_save_min(i, type, minval, min) \
}
@@ -621,42 +617,42 @@ H5Z_class2_t H5Z_SCALEOFFSET[1] = {{
}
/* Modify values of data in postdecompression if fill value defined for floating-point type */
-#define H5Z_scaleoffset_modify_3(i, type, buf, d_nelmts, filval, minbits, min, D_val) \
+#define H5Z_scaleoffset_modify_3(i, type, pow_fun, buf, d_nelmts, filval, minbits, min, D_val) \
{ \
- if(sizeof(type)==sizeof(int)) \
+ if(sizeof(type) == sizeof(int)) \
for(i = 0; i < d_nelmts; i++) \
buf[i] = (type)((*(int *)&buf[i] == (int)(((unsigned int)1 << minbits) - 1)) ? \
- filval : (double)(*(int *)&buf[i]) / HDpow(10.0f, D_val) + min); \
- else if(sizeof(type)==sizeof(long)) \
+ filval : (type)(*(int *)&buf[i]) / pow_fun(10.0f, (type)D_val) + min); \
+ else if(sizeof(type) == sizeof(long)) \
for(i = 0; i < d_nelmts; i++) \
buf[i] = (type)((*(long *)&buf[i] == (long)(((unsigned long)1 << minbits) - 1)) ? \
- filval : (double)(*(long *)&buf[i]) / HDpow(10.0f, D_val) + min); \
- else if(sizeof(type)==sizeof(long long)) \
+ filval : (type)(*(long *)&buf[i]) / pow_fun(10.0f, (type)D_val) + min); \
+ else if(sizeof(type) == sizeof(long long)) \
for(i = 0; i < d_nelmts; i++) \
buf[i] = (type)((*(long long *)&buf[i] == (long long)(((unsigned long long)1 << minbits) - 1)) ? \
- filval : (double)(*(long long *)&buf[i]) / HDpow(10.0f, D_val) + min); \
+ filval : (type)(*(long long *)&buf[i]) / pow_fun(10.0f, (type)D_val) + min); \
else \
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "cannot find matched integer dataype") \
}
/* Modify values of data in postdecompression if fill value undefined for floating-point type */
-#define H5Z_scaleoffset_modify_4(i, type, buf, d_nelmts, min, D_val) \
+#define H5Z_scaleoffset_modify_4(i, type, pow_fun, buf, d_nelmts, min, D_val) \
{ \
if(sizeof(type)==sizeof(int)) \
for(i = 0; i < d_nelmts; i++) \
- buf[i] = (type)((double)(*(int *)&buf[i]) / HDpow(10.0f, D_val) + min); \
+ buf[i] = ((type)(*(int *)&buf[i]) / pow_fun(10.0f, (type)D_val) + min); \
else if(sizeof(type)==sizeof(long)) \
for(i = 0; i < d_nelmts; i++) \
- buf[i] = (type)((double)(*(long *)&buf[i]) / HDpow(10.0f, D_val) + min); \
+ buf[i] = ((type)(*(long *)&buf[i]) / pow_fun(10.0f, (type)D_val) + min); \
else if(sizeof(type)==sizeof(long long)) \
for(i = 0; i < d_nelmts; i++) \
- buf[i] = (type)((double)(*(long long *)&buf[i]) / HDpow(10.0f, D_val) + min); \
+ buf[i] = ((type)(*(long long *)&buf[i]) / pow_fun(10.0f, (type)D_val) + min); \
else \
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "cannot find matched integer dataype") \
}
/* Postdecompress for floating-point type using variable-minimum-bits method */
-#define H5Z_scaleoffset_postdecompress_3(type, data, d_nelmts, filavail, cd_values, \
+#define H5Z_scaleoffset_postdecompress_3(type, pow_fun, data, d_nelmts, filavail, cd_values, \
minbits, minval, D_val) \
{ \
type *buf = (type *)data, filval = 0, min = 0; \
@@ -666,9 +662,9 @@ H5Z_class2_t H5Z_SCALEOFFSET[1] = {{
\
if(filavail == H5Z_SCALEOFFSET_FILL_DEFINED) { /* fill value defined */ \
H5Z_scaleoffset_get_filval_2(type, cd_values, filval) \
- H5Z_scaleoffset_modify_3(i, type, buf, d_nelmts, filval, minbits, min, D_val) \
+ H5Z_scaleoffset_modify_3(i, type, pow_fun, buf, d_nelmts, filval, minbits, min, D_val) \
} else /* fill value undefined */ \
- H5Z_scaleoffset_modify_4(i, type, buf, d_nelmts, min, D_val) \
+ H5Z_scaleoffset_modify_4(i, type, pow_fun, buf, d_nelmts, min, D_val) \
}
@@ -756,7 +752,9 @@ H5Z_scaleoffset_get_type(unsigned dtype_class, unsigned dtype_size, unsigned dty
else if(dtype_size == sizeof(unsigned short)) type = t_ushort;
else if(dtype_size == sizeof(unsigned int)) type = t_uint;
else if(dtype_size == sizeof(unsigned long)) type = t_ulong;
+#if H5_SIZEOF_LONG != H5_SIZEOF_LONG_LONG
else if(dtype_size == sizeof(unsigned long long)) type = t_ulong_long;
+#endif /* H5_SIZEOF_LONG != H5_SIZEOF_LONG_LONG */
else
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, t_bad, "cannot find matched memory dataype")
}
@@ -766,7 +764,9 @@ H5Z_scaleoffset_get_type(unsigned dtype_class, unsigned dtype_size, unsigned dty
else if(dtype_size == sizeof(short)) type = t_short;
else if(dtype_size == sizeof(int)) type = t_int;
else if(dtype_size == sizeof(long)) type = t_long;
+#if H5_SIZEOF_LONG != H5_SIZEOF_LONG_LONG
else if(dtype_size == sizeof(long long)) type = t_long_long;
+#endif /* H5_SIZEOF_LONG != H5_SIZEOF_LONG_LONG */
else
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, t_bad, "cannot find matched memory dataype")
}
@@ -935,7 +935,8 @@ H5Z_set_local_scaleoffset(hid_t dcpl_id, hid_t type_id, hid_t space_id)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "bad datatype size")
/* Set "local" parameter for datatype size */
- cd_values[H5Z_SCALEOFFSET_PARM_SIZE] = dtype_size;
+ H5_CHECK_OVERFLOW(dtype_size, size_t, unsigned);
+ cd_values[H5Z_SCALEOFFSET_PARM_SIZE] = (unsigned)dtype_size;
if(dtype_class == H5T_INTEGER) {
/* Get datatype's sign */
@@ -1196,7 +1197,7 @@ H5Z_filter_scaleoffset(unsigned flags, size_t cd_nelmts, const unsigned cd_value
}
/* before postprocess, get memory type */
- if((type = H5Z_scaleoffset_get_type(dtype_class, (unsigned)p.size, dtype_sign)) == 0)
+ if((type = H5Z_scaleoffset_get_type(dtype_class, p.size, dtype_sign)) == 0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, 0, "cannot use C integer datatype for cast")
/* postprocess after decompression */
@@ -1224,7 +1225,7 @@ H5Z_filter_scaleoffset(unsigned flags, size_t cd_nelmts, const unsigned cd_value
H5Z_scaleoffset_convert(*buf, d_nelmts, p.size);
/* before preprocess, get memory type */
- if((type = H5Z_scaleoffset_get_type(dtype_class, (unsigned)p.size, dtype_sign))==0)
+ if((type = H5Z_scaleoffset_get_type(dtype_class, p.size, dtype_sign))==0)
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, 0, "cannot use C integer datatype for cast")
/* preprocess before compression */
@@ -1316,10 +1317,10 @@ done:
* or from big-endian to little-endian 2/21/2005
*/
static void
-H5Z_scaleoffset_convert(void *buf, unsigned d_nelmts, size_t dtype_size)
+H5Z_scaleoffset_convert(void *buf, unsigned d_nelmts, unsigned dtype_size)
{
if(dtype_size > 1) {
- unsigned i, j;
+ size_t i, j;
unsigned char *buffer, temp;
buffer = (unsigned char *)buf;
@@ -1333,32 +1334,6 @@ H5Z_scaleoffset_convert(void *buf, unsigned d_nelmts, size_t dtype_size)
} /* end if */
} /* end H5Z_scaleoffset_convert() */
-/* Round a floating-point value to the nearest integer value 4/19/05 */
-/* rounding to the bigger absolute value if val is in the middle,
- 0.5 -> 1, -0.5 ->-1
-5/9/05, KY */
-static double
-H5Z_scaleoffset_rnd(double val)
-{
- double u_val, l_val;
-
- u_val = HDceil(val);
- l_val = HDfloor(val);
-
- if(val > 0) {
- if((u_val - val) <= (val - l_val))
- return u_val;
- else
- return l_val;
- } /* end if */
- else {
- if((val - l_val) <= (u_val - val))
- return l_val;
- else
- return u_val;
- }
-} /* H5Z_scaleoffset_rnd() */
-
/* return ceiling of floating-point log2 function
* receive unsigned integer as argument 3/10/2005
*/
@@ -1507,19 +1482,19 @@ H5Z_scaleoffset_precompress_fd(void *data, unsigned d_nelmts, enum H5Z_scaleoffs
unsigned filavail, const unsigned cd_values[], uint32_t *minbits,
unsigned long long *minval, double D_val)
{
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_NOAPI_NOINIT
- if(type == t_float)
- H5Z_scaleoffset_precompress_3(float, data, d_nelmts,
- filavail, cd_values, minbits, minval, D_val)
- else if(type == t_double)
- H5Z_scaleoffset_precompress_3(double, data, d_nelmts,
- filavail, cd_values, minbits, minval, D_val)
+ if(type == t_float)
+ H5Z_scaleoffset_precompress_3(float, HDpowf, HDfabsf, HDroundf, HDlroundf, HDllroundf, data, d_nelmts,
+ filavail, cd_values, minbits, minval, D_val)
+ else if(type == t_double)
+ H5Z_scaleoffset_precompress_3(double, HDpow, HDfabs, HDround, HDlround, HDllround, data, d_nelmts,
+ filavail, cd_values, minbits, minval, D_val)
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
}
/* postdecompress for floating-point type, variable-minimum-bits method
@@ -1535,10 +1510,10 @@ H5Z_scaleoffset_postdecompress_fd(void *data, unsigned d_nelmts, enum H5Z_scaleo
FUNC_ENTER_NOAPI_NOINIT
if(type == t_float)
- H5Z_scaleoffset_postdecompress_3(float, data, d_nelmts, filavail,
+ H5Z_scaleoffset_postdecompress_3(float, HDpowf, data, d_nelmts, filavail,
cd_values, minbits, sminval, D_val)
else if(type == t_double)
- H5Z_scaleoffset_postdecompress_3(double, data, d_nelmts, filavail,
+ H5Z_scaleoffset_postdecompress_3(double, HDpow, data, d_nelmts, filavail,
cd_values, minbits, sminval, D_val)
done:
@@ -1568,18 +1543,18 @@ H5Z_scaleoffset_decompress_one_byte(unsigned char *data, size_t data_offset,
dat_len = 8;
if(*buf_len > dat_len) {
- data[data_offset + k] = (unsigned char)((val >> (*buf_len - dat_len)) & ~(~0 << dat_len));
+ data[data_offset + k] = (unsigned char)((unsigned)(val >> (*buf_len - dat_len)) & (unsigned)(~((unsigned)~0 << dat_len)));
*buf_len -= dat_len;
} /* end if */
else {
- data[data_offset + k] = (unsigned char)((val & ~(~0 << *buf_len)) << (dat_len - *buf_len));
+ data[data_offset + k] = (unsigned char)((val & ~((unsigned)(~0) << *buf_len)) << (dat_len - *buf_len));
dat_len -= *buf_len;
H5Z_scaleoffset_next_byte(j, buf_len);
if(dat_len == 0)
return;
val = buffer[*j];
- data[data_offset + k] |= (unsigned char)((val >> (*buf_len - dat_len)) & ~(~0 << dat_len));
+ data[data_offset + k] |= (unsigned char)((unsigned)(val >> (*buf_len - dat_len)) & ~((unsigned)(~0) << dat_len));
*buf_len -= dat_len;
} /* end else */
}
@@ -1625,7 +1600,7 @@ H5Z_scaleoffset_decompress(unsigned char *data, unsigned d_nelmts,
unsigned buf_len;
/* must initialize to zeros */
- for(i = 0; i < d_nelmts*p.size; i++)
+ for(i = 0; i < d_nelmts * p.size; i++)
data[i] = 0;
/* initialization before the loop */
@@ -1653,16 +1628,16 @@ H5Z_scaleoffset_compress_one_byte(unsigned char *data, size_t data_offset,
dat_len = 8;
if(*buf_len > dat_len) {
- buffer[*j] |= (unsigned char)((val & ~(~0 << dat_len)) << (*buf_len - dat_len));
+ buffer[*j] |= (unsigned char)((val & ~((unsigned)(~0) << dat_len)) << (*buf_len - dat_len));
*buf_len -= dat_len;
} else {
- buffer[*j] |= (unsigned char)((val >> (dat_len - *buf_len)) & ~(~0 << *buf_len));
+ buffer[*j] |= (unsigned char)((unsigned)(val >> (dat_len - *buf_len)) & ~((unsigned)(~0) << *buf_len));
dat_len -= *buf_len;
H5Z_scaleoffset_next_byte(j, buf_len);
if(dat_len == 0)
return;
- buffer[*j] = (unsigned char)((val & ~(~0 << dat_len)) << (*buf_len - dat_len));
+ buffer[*j] = (unsigned char)((val & ~((unsigned)(~0) << dat_len)) << (*buf_len - dat_len));
*buf_len -= dat_len;
} /* end else */
}
diff --git a/src/H5Ztrans.c b/src/H5Ztrans.c
index 2627a29..f30f0b2 100644
--- a/src/H5Ztrans.c
+++ b/src/H5Ztrans.c
@@ -124,8 +124,8 @@ static void H5Z_print(H5Z_node *tree, FILE *stream);
tree_val = ((RESR).type==H5Z_XFORM_INTEGER ? (double)(RESR).value.int_val : (RESR).value.float_val); \
p = (TYPE*)(RESL).value.dat_val; \
\
- for(u=0; u<(SIZE); u++) { \
- *p = *p OP tree_val; \
+ for(u = 0; u < (SIZE); u++) { \
+ *p = (TYPE)((double)*p OP tree_val); \
p++; \
} \
} \
@@ -141,32 +141,31 @@ static void H5Z_print(H5Z_node *tree, FILE *stream);
tree_val = ((RESL).type==H5Z_XFORM_INTEGER ? (double)(RESL).value.int_val : (RESL).value.float_val); \
\
p = (TYPE*)(RESR).value.dat_val; \
- for(u=0; u<(SIZE); u++) { \
- *p = tree_val OP *p; \
+ for(u = 0; u < (SIZE); u++) { \
+ *p = (TYPE)(tree_val OP (double)*p); \
p++; \
} \
} \
- else if( ((RESL).type == H5Z_XFORM_SYMBOL) && ((RESR).type==H5Z_XFORM_SYMBOL)) \
- { \
- TYPE* pl = (TYPE*)(RESL).value.dat_val; \
- TYPE* pr = (TYPE*)(RESR).value.dat_val; \
- \
- for(u=0; u<(SIZE); u++) { \
- *pl = *pl OP *pr; \
- pl++; pr++; \
- } \
- } \
- else \
+ else if( ((RESL).type == H5Z_XFORM_SYMBOL) && ((RESR).type == H5Z_XFORM_SYMBOL)) \
+ { \
+ TYPE* pl = (TYPE*)(RESL).value.dat_val; \
+ TYPE* pr = (TYPE*)(RESR).value.dat_val; \
+ \
+ for(u = 0; u < (SIZE); u++) { \
+ *pl = (TYPE)(*pl OP *pr); \
+ pl++; pr++; \
+ } \
+ } \
+ else \
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unexpected type conversion operation") \
}
-#if H5_SIZEOF_LONG_DOUBLE !=0
+#if H5_SIZEOF_LONG_DOUBLE != 0
+#if CHAR_MIN >= 0
#define H5Z_XFORM_TYPE_OP(RESL,RESR,TYPE,OP,SIZE) \
{ \
if((TYPE) == H5T_NATIVE_CHAR) \
H5Z_XFORM_DO_OP1((RESL), (RESR), char, OP, (SIZE)) \
- else if((TYPE) == H5T_NATIVE_UCHAR) \
- H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned char, OP, (SIZE)) \
else if((TYPE) == H5T_NATIVE_SCHAR) \
H5Z_XFORM_DO_OP1((RESL), (RESR), signed char, OP, (SIZE)) \
else if((TYPE) == H5T_NATIVE_SHORT) \
@@ -182,7 +181,7 @@ static void H5Z_print(H5Z_node *tree, FILE *stream);
else if((TYPE) == H5T_NATIVE_ULONG) \
H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned long, OP, (SIZE)) \
else if((TYPE) == H5T_NATIVE_LLONG) \
- H5Z_XFORM_DO_OP1((RESL), (RESR), long long, OP, (SIZE)) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), long long, OP, (SIZE)) \
else if((TYPE) == H5T_NATIVE_ULLONG) \
H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned long long, OP, (SIZE)) \
else if((TYPE) == H5T_NATIVE_FLOAT) \
@@ -192,13 +191,43 @@ static void H5Z_print(H5Z_node *tree, FILE *stream);
else if((TYPE) == H5T_NATIVE_LDOUBLE) \
H5Z_XFORM_DO_OP1((RESL), (RESR), long double, OP, (SIZE)) \
}
-#else
+#else /* CHAR_MIN >= 0 */
#define H5Z_XFORM_TYPE_OP(RESL,RESR,TYPE,OP,SIZE) \
{ \
if((TYPE) == H5T_NATIVE_CHAR) \
H5Z_XFORM_DO_OP1((RESL), (RESR), char, OP, (SIZE)) \
else if((TYPE) == H5T_NATIVE_UCHAR) \
H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned char, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_SHORT) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), short, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_USHORT) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned short, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_INT) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), int, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_UINT) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned int, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_LONG) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), long, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_ULONG) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned long, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_LLONG) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), long long, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_ULLONG) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned long long, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_FLOAT) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), float, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_DOUBLE) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), double, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_LDOUBLE) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), long double, OP, (SIZE)) \
+}
+#endif /* CHAR_MIN >= 0 */
+#else
+#if CHAR_MIN >= 0
+#define H5Z_XFORM_TYPE_OP(RESL,RESR,TYPE,OP,SIZE) \
+{ \
+ if((TYPE) == H5T_NATIVE_CHAR) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), char, OP, (SIZE)) \
else if((TYPE) == H5T_NATIVE_SCHAR) \
H5Z_XFORM_DO_OP1((RESL), (RESR), signed char, OP, (SIZE)) \
else if((TYPE) == H5T_NATIVE_SHORT) \
@@ -222,6 +251,35 @@ static void H5Z_print(H5Z_node *tree, FILE *stream);
else if((TYPE) == H5T_NATIVE_DOUBLE) \
H5Z_XFORM_DO_OP1((RESL), (RESR), double, OP, (SIZE)) \
}
+#else /* CHAR_MIN >= 0 */
+#define H5Z_XFORM_TYPE_OP(RESL,RESR,TYPE,OP,SIZE) \
+{ \
+ if((TYPE) == H5T_NATIVE_CHAR) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), char, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_UCHAR) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned char, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_SHORT) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), short, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_USHORT) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned short, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_INT) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), int, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_UINT) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned int, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_LONG) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), long, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_ULONG) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned long, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_LLONG) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), long long, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_ULLONG) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), unsigned long long, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_FLOAT) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), float, OP, (SIZE)) \
+ else if((TYPE) == H5T_NATIVE_DOUBLE) \
+ H5Z_XFORM_DO_OP1((RESL), (RESR), double, OP, (SIZE)) \
+}
+#endif /* CHAR_MIN >= 0 */
#endif /*H5_SIZEOF_LONG_DOUBLE */
#define H5Z_XFORM_DO_OP3(OP) \
@@ -977,17 +1035,20 @@ H5Z_xform_eval(H5Z_data_xform_t *data_xform_prop, void* array, size_t array_size
if(tree->type == H5Z_XFORM_INTEGER || tree->type == H5Z_XFORM_FLOAT) {
if(array_type == H5T_NATIVE_CHAR)
H5Z_XFORM_DO_OP5(char, array_size)
- else if(array_type == H5T_NATIVE_UCHAR)
- H5Z_XFORM_DO_OP5(unsigned char, array_size)
+#if CHAR_MIN >= 0
else if(array_type == H5T_NATIVE_SCHAR)
H5Z_XFORM_DO_OP5(signed char, array_size)
+#else /* CHAR_MIN >= 0 */
+ else if(array_type == H5T_NATIVE_UCHAR)
+ H5Z_XFORM_DO_OP5(unsigned char, array_size)
+#endif /* CHAR_MIN >= 0 */
else if(array_type == H5T_NATIVE_SHORT)
H5Z_XFORM_DO_OP5(short, array_size)
else if(array_type == H5T_NATIVE_USHORT)
H5Z_XFORM_DO_OP5(unsigned short, array_size)
else if(array_type == H5T_NATIVE_INT)
H5Z_XFORM_DO_OP5(int, array_size)
- else if(array_type == H5T_NATIVE_UINT)
+ else if(array_type == H5T_NATIVE_UINT)
H5Z_XFORM_DO_OP5(unsigned int, array_size)
else if(array_type == H5T_NATIVE_LONG)
H5Z_XFORM_DO_OP5(long, array_size)
@@ -1106,34 +1167,32 @@ H5Z_xform_eval_full(H5Z_node *tree, const size_t array_size, const hid_t array_
* 1. See if "x" is on left hand side, right hand side, or if both sides are "x"
* 2. Figure out what type of data we're going to be manipulating
* 3. Do the operation on the data. */
-
-
switch (tree->type) {
case H5Z_XFORM_PLUS:
- H5Z_XFORM_TYPE_OP(resl, resr, array_type, +, array_size)
- break;
+ H5Z_XFORM_TYPE_OP(resl, resr, array_type, +, array_size)
+ break;
case H5Z_XFORM_MINUS:
- H5Z_XFORM_TYPE_OP(resl, resr, array_type, -, array_size)
- break;
+ H5Z_XFORM_TYPE_OP(resl, resr, array_type, -, array_size)
+ break;
case H5Z_XFORM_MULT:
- H5Z_XFORM_TYPE_OP(resl, resr, array_type, *, array_size)
- break;
+ H5Z_XFORM_TYPE_OP(resl, resr, array_type, *, array_size)
+ break;
case H5Z_XFORM_DIVIDE:
- H5Z_XFORM_TYPE_OP(resl, resr, array_type, /, array_size)
- break;
+ H5Z_XFORM_TYPE_OP(resl, resr, array_type, /, array_size)
+ break;
- case H5Z_XFORM_ERROR:
- case H5Z_XFORM_INTEGER:
- case H5Z_XFORM_FLOAT:
- case H5Z_XFORM_SYMBOL:
- case H5Z_XFORM_LPAREN:
- case H5Z_XFORM_RPAREN:
- case H5Z_XFORM_END:
+ case H5Z_XFORM_ERROR:
+ case H5Z_XFORM_INTEGER:
+ case H5Z_XFORM_FLOAT:
+ case H5Z_XFORM_SYMBOL:
+ case H5Z_XFORM_LPAREN:
+ case H5Z_XFORM_RPAREN:
+ case H5Z_XFORM_END:
default:
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid expression tree")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid expression tree")
} /* end switch */
/* The result stores a pointer to the new data */
diff --git a/src/H5detect.c b/src/H5detect.c
index ee02c52..94e841e 100644
--- a/src/H5detect.c
+++ b/src/H5detect.c
@@ -1177,7 +1177,7 @@ imp_bit(unsigned int n, int *perm, void *_a, void *_b, const unsigned char *pad_
*
*-------------------------------------------------------------------------
*/
-static unsigned int
+H5_ATTR_PURE static unsigned int
find_bias(unsigned int epos, unsigned int esize, int *perm, void *_a)
{
unsigned char *a = (unsigned char *) _a;
diff --git a/src/H5private.h b/src/H5private.h
index d400469..6a677bb 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -657,6 +657,9 @@ typedef struct {
#ifndef HDatol
#define HDatol(S) atol(S)
#endif /* HDatol */
+#ifndef HDatoll
+ #define HDatoll(S) atoll(S)
+#endif /* HDatol */
#ifndef HDbsearch
#define HDbsearch(K,B,N,Z,F) bsearch(K,B,N,Z,F)
#endif /* HDbsearch */
@@ -1029,6 +1032,15 @@ typedef off_t h5_stat_size_t;
#ifndef HDlink
#define HDlink(OLD,NEW) link(OLD,NEW)
#endif /* HDlink */
+#ifndef HDllround
+ #define HDllround(V) llround(V)
+#endif /* HDround */
+#ifndef HDllroundf
+ #define HDllroundf(V) llroundf(V)
+#endif /* HDllroundf */
+#ifndef HDllroundl
+ #define HDllroundl(V) llroundl(V)
+#endif /* HDllroundl */
#ifndef HDlocaleconv
#define HDlocaleconv() localeconv()
#endif /* HDlocaleconv */
@@ -1044,6 +1056,15 @@ typedef off_t h5_stat_size_t;
#ifndef HDlongjmp
#define HDlongjmp(J,N) longjmp(J,N)
#endif /* HDlongjmp */
+#ifndef HDlround
+ #define HDlround(V) lround(V)
+#endif /* HDround */
+#ifndef HDlroundf
+ #define HDlroundf(V) lroundf(V)
+#endif /* HDlroundf */
+#ifndef HDlroundl
+ #define HDlroundl(V) lroundl(V)
+#endif /* HDroundl */
#ifndef HDlseek
#define HDlseek(F,O,W) lseek(F,O,W)
#endif /* HDlseek */
@@ -1118,6 +1139,9 @@ typedef off_t h5_stat_size_t;
#ifndef HDpow
#define HDpow(X,Y) pow(X,Y)
#endif /* HDpow */
+#ifndef HDpowf
+ #define HDpowf(X,Y) powf(X,Y)
+#endif /* HDpowf */
/* printf() variable arguments */
#ifndef HDputc
#define HDputc(C,F) putc(C,F)
@@ -1180,6 +1204,15 @@ typedef off_t h5_stat_size_t;
#ifndef HDrewinddir
#define HDrewinddir(D) rewinddir(D)
#endif /* HDrewinddir */
+#ifndef HDround
+ #define HDround(V) round(V)
+#endif /* HDround */
+#ifndef HDroundf
+ #define HDroundf(V) roundf(V)
+#endif /* HDroundf */
+#ifndef HDroundl
+ #define HDroundl(V) roundl(V)
+#endif /* HDroundl */
#ifndef HDrmdir
#define HDrmdir(S) rmdir(S)
#endif /* HDrmdir */
diff --git a/src/H5public.h b/src/H5public.h
index 554ad26..2f21648 100644
--- a/src/H5public.h
+++ b/src/H5public.h
@@ -194,28 +194,21 @@ H5_GCC_DIAG_ON(long-long)
/*
* File addresses have their own types.
*/
-#if H5_SIZEOF_INT64_T>=8
- typedef uint64_t haddr_t;
-# define HADDR_UNDEF ((haddr_t)(int64_t)(-1))
-# define H5_SIZEOF_HADDR_T H5_SIZEOF_INT64_T
-# ifdef H5_HAVE_PARALLEL
-# define HADDR_AS_MPI_TYPE MPI_LONG_LONG_INT
-# endif /* H5_HAVE_PARALLEL */
-#elif H5_SIZEOF_INT>=8
+#if H5_SIZEOF_INT >= 8
typedef unsigned haddr_t;
# define HADDR_UNDEF ((haddr_t)(-1))
# define H5_SIZEOF_HADDR_T H5_SIZEOF_INT
# ifdef H5_HAVE_PARALLEL
# define HADDR_AS_MPI_TYPE MPI_UNSIGNED
# endif /* H5_HAVE_PARALLEL */
-#elif H5_SIZEOF_LONG>=8
+#elif H5_SIZEOF_LONG >= 8
typedef unsigned long haddr_t;
# define HADDR_UNDEF ((haddr_t)(long)(-1))
# define H5_SIZEOF_HADDR_T H5_SIZEOF_LONG
# ifdef H5_HAVE_PARALLEL
# define HADDR_AS_MPI_TYPE MPI_UNSIGNED_LONG
# endif /* H5_HAVE_PARALLEL */
-#elif H5_SIZEOF_LONG_LONG>=8
+#elif H5_SIZEOF_LONG_LONG >= 8
typedef unsigned long long haddr_t;
# define HADDR_UNDEF ((haddr_t)(long long)(-1))
# define H5_SIZEOF_HADDR_T H5_SIZEOF_LONG_LONG
@@ -225,11 +218,11 @@ H5_GCC_DIAG_ON(long-long)
#else
# error "nothing appropriate for haddr_t"
#endif
-#if H5_SIZEOF_HADDR_T ==H5_SIZEOF_INT
+#if H5_SIZEOF_HADDR_T == H5_SIZEOF_INT
# define H5_PRINTF_HADDR_FMT "%u"
-#elif H5_SIZEOF_HADDR_T ==H5_SIZEOF_LONG
+#elif H5_SIZEOF_HADDR_T == H5_SIZEOF_LONG
# define H5_PRINTF_HADDR_FMT "%lu"
-#elif H5_SIZEOF_HADDR_T ==H5_SIZEOF_LONG_LONG
+#elif H5_SIZEOF_HADDR_T == H5_SIZEOF_LONG_LONG
# define H5_PRINTF_HADDR_FMT "%" H5_PRINTF_LL_WIDTH "u"
#else
# error "nothing appropriate for H5_PRINTF_HADDR_FMT"