summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorBinh-Minh Ribler <bmribler@hdfgroup.org>2017-03-03 14:50:04 (GMT)
committerBinh-Minh Ribler <bmribler@hdfgroup.org>2017-03-03 14:50:04 (GMT)
commit8f5a1f43d0cf527488be2b2805bac2b6c86b8c66 (patch)
tree1e7406feedc42fff560ca99d7ca5850672c68cf5 /src
parent861a849530df32d9f1a495eb8b82877198dab7de (diff)
parent0df90f9f796d543549a30a648e864932126b6f52 (diff)
downloadhdf5-8f5a1f43d0cf527488be2b2805bac2b6c86b8c66.zip
hdf5-8f5a1f43d0cf527488be2b2805bac2b6c86b8c66.tar.gz
hdf5-8f5a1f43d0cf527488be2b2805bac2b6c86b8c66.tar.bz2
Merge branch 'develop' of https://bitbucket.hdfgroup.org/scm/~bmribler/hdf5_bmr_cpp into develop
Diffstat (limited to 'src')
-rw-r--r--src/H5AC.c12
-rw-r--r--src/H5ACmpio.c26
-rw-r--r--src/H5C.c697
-rw-r--r--src/H5Cdbg.c18
-rw-r--r--src/H5Cimage.c896
-rw-r--r--src/H5Cmpio.c101
-rw-r--r--src/H5Cpkg.h107
-rw-r--r--src/H5Cprivate.h3
-rw-r--r--src/H5Cquery.c3
-rw-r--r--src/H5Fint.c16
-rw-r--r--src/H5Fsuper.c19
-rw-r--r--src/H5HFcache.c4
-rw-r--r--src/H5MF.c133
-rw-r--r--src/H5MFprivate.h8
14 files changed, 972 insertions, 1071 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index 3710746..ee68a6f 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -616,12 +616,18 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
#ifdef H5_HAVE_PARALLEL
if(aux_ptr != NULL) {
- if(aux_ptr->d_slist_ptr != NULL)
+ if(aux_ptr->d_slist_ptr != NULL) {
+ HDassert(H5SL_count(aux_ptr->d_slist_ptr) == 0);
H5SL_close(aux_ptr->d_slist_ptr);
- if(aux_ptr->c_slist_ptr != NULL)
+ } /* end if */
+ if(aux_ptr->c_slist_ptr != NULL) {
+ HDassert(H5SL_count(aux_ptr->c_slist_ptr) == 0);
H5SL_close(aux_ptr->c_slist_ptr);
- if(aux_ptr->candidate_slist_ptr != NULL)
+ } /* end if */
+ if(aux_ptr->candidate_slist_ptr != NULL) {
+ HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) == 0);
H5SL_close(aux_ptr->candidate_slist_ptr);
+ } /* end if */
aux_ptr->magic = 0;
aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr);
} /* end if */
diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c
index cccb3bd..945aaba 100644
--- a/src/H5ACmpio.c
+++ b/src/H5ACmpio.c
@@ -81,7 +81,7 @@ typedef struct H5AC_addr_list_ud_t
{
H5AC_aux_t * aux_ptr; /* 'Auxiliary' parallel cache info */
haddr_t * addr_buf_ptr; /* Array to store addresses */
- int i; /* Counter for position in array */
+ unsigned u; /* Counter for position in array */
} H5AC_addr_list_ud_t;
@@ -310,13 +310,13 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr,
* receivers can set up buffers to receive them. If there aren't
* any, we are done.
*/
- num_entries = H5SL_count(aux_ptr->candidate_slist_ptr);
+ num_entries = (unsigned)H5SL_count(aux_ptr->candidate_slist_ptr);
if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
if(num_entries > 0) {
- size_t buf_size = 0;
- unsigned chk_num_entries = 0;
+ size_t buf_size = 0;
+ unsigned chk_num_entries = 0;
/* convert the candidate list into the format we
* are used to receiving from process 0, and also load it
@@ -378,8 +378,8 @@ H5AC__broadcast_clean_list_cb(void *_item, void H5_ATTR_UNUSED *_key,
/* Store the entry's address in the buffer */
addr = slist_entry_ptr->addr;
- udata->addr_buf_ptr[udata->i] = addr;
- udata->i++;
+ udata->addr_buf_ptr[udata->u] = addr;
+ udata->u++;
/* now release the entry */
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
@@ -437,7 +437,7 @@ H5AC__broadcast_clean_list(H5AC_t * cache_ptr)
* receives can set up a buffer to receive them. If there aren't
* any, we are done.
*/
- num_entries = H5SL_count(aux_ptr->c_slist_ptr);
+ num_entries = (unsigned)H5SL_count(aux_ptr->c_slist_ptr);
if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
@@ -453,7 +453,7 @@ H5AC__broadcast_clean_list(H5AC_t * cache_ptr)
/* Set up user data for callback */
udata.aux_ptr = aux_ptr;
udata.addr_buf_ptr = addr_buf_ptr;
- udata.i = 0;
+ udata.u = 0;
/* Free all the clean list entries, building the address list in the callback */
/* (Callback also removes the matching entries from the dirtied list) */
@@ -568,8 +568,8 @@ H5AC__copy_candidate_list_to_buffer_cb(void *_item, void H5_ATTR_UNUSED *_key,
HDassert(udata);
/* Store the entry's address in the buffer */
- udata->addr_buf_ptr[udata->i] = slist_entry_ptr->addr;
- udata->i++;
+ udata->addr_buf_ptr[udata->u] = slist_entry_ptr->addr;
+ udata->u++;
/* now release the entry */
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
@@ -635,7 +635,7 @@ H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, unsigned *num_entri
HDassert(haddr_buf_ptr_ptr != NULL);
HDassert(*haddr_buf_ptr_ptr == NULL);
- num_entries = (int)H5SL_count(aux_ptr->candidate_slist_ptr);
+ num_entries = (unsigned)H5SL_count(aux_ptr->candidate_slist_ptr);
/* allocate a buffer(s) to store the list of candidate entry
* base addresses in
@@ -647,7 +647,7 @@ H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, unsigned *num_entri
/* Set up user data for callback */
udata.aux_ptr = aux_ptr;
udata.addr_buf_ptr = haddr_buf_ptr;
- udata.i = 0;
+ udata.u = 0;
/* Free all the candidate list entries, building the address list in the callback */
if(H5SL_free(aux_ptr->candidate_slist_ptr, H5AC__copy_candidate_list_to_buffer_cb, &udata) < 0)
@@ -1543,7 +1543,7 @@ H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t dxpl_id)
if(num_entries > 0)
/* mark the indicated entries as clean */
- if(H5C_mark_entries_as_clean(f, dxpl_id, (int32_t)num_entries, haddr_buf_ptr) < 0)
+ if(H5C_mark_entries_as_clean(f, dxpl_id, num_entries, haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't mark entries clean.")
/* if it is defined, call the sync point done callback. Note
diff --git a/src/H5C.c b/src/H5C.c
index ba23b50..805b4f5 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -159,6 +159,10 @@ static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry);
static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry);
+static herr_t H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring);
+static herr_t H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id,
+ H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
+
static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t * type,
haddr_t addr, size_t *len, hbool_t actual);
@@ -750,20 +754,10 @@ H5C_prep_for_file_close(H5F_t *f, hid_t dxpl_id)
/* Make certain there aren't any protected entries */
HDassert(cache_ptr->pl_len == 0);
- /* If the file is opened and closed without any access to
- * any group or dataset, it is possible that the cache image (if
- * it exists) has not been read yet. Do this now if required.
- */
- if(cache_ptr->load_image) {
- cache_ptr->load_image = FALSE;
- if(H5C__load_cache_image(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't load cache image")
- } /* end if */
+ /* Prepare cache image */
+ if(H5C__prep_image_for_file_close(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cache image")
- /* Generate the cache image, if requested */
- if(cache_ptr->image_ctl.generate_image)
- if(H5C__prep_image_for_file_close(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "Can't create cache image")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1086,9 +1080,9 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
*/
ring = H5C_RING_USER;
while(ring < H5C_RING_NTYPES) {
- /* only call the free space manager settle routines when close
- * warning has been received, and then only when the index is
- * non-empty for that ring.
+
+ /* Only call the free space manager settle routines when close
+ * warning has been received.
*/
if(cache_ptr->close_warning_received) {
switch(ring) {
@@ -1096,36 +1090,20 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
break;
case H5C_RING_RDFSM:
- if(!cache_ptr->rdfsm_settled) {
- hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
-
- /* Settle raw data FSM */
- if(H5MF_settle_raw_data_fsm(f, dxpl_id, &fsm_settled) < 0)
+ /* Settle raw data FSM */
+ if(!cache_ptr->rdfsm_settled)
+ if(H5MF_settle_raw_data_fsm(f, dxpl_id, &cache_ptr->rdfsm_settled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
-
- /* Only set the flag if the FSM was actually settled */
- if(fsm_settled)
- cache_ptr->rdfsm_settled = TRUE;
- } /* end if */
break;
case H5C_RING_MDFSM:
- if(!cache_ptr->mdfsm_settled) {
- hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
-
- /* Settle metadata FSM */
- if(H5MF_settle_meta_data_fsm(f, dxpl_id, &fsm_settled) < 0)
+ /* Settle metadata FSM */
+ if(!cache_ptr->mdfsm_settled)
+ if(H5MF_settle_meta_data_fsm(f, dxpl_id, &cache_ptr->mdfsm_settled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
-
- /* Only set the flag if the FSM was actually settled */
- if(fsm_settled)
- cache_ptr->mdfsm_settled = TRUE;
- } /* end if */
break;
case H5C_RING_SBE:
- break;
-
case H5C_RING_SB:
break;
@@ -1470,6 +1448,11 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->aux_next = NULL;
entry_ptr->aux_prev = NULL;
+#ifdef H5_HAVE_PARALLEL
+ entry_ptr->coll_next = NULL;
+ entry_ptr->coll_prev = NULL;
+#endif /* H5_HAVE_PARALLEL */
+
/* initialize cache image related fields */
entry_ptr->include_in_image = FALSE;
entry_ptr->lru_rank = 0;
@@ -1486,11 +1469,6 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->serialization_count = 0;
#endif /* NDEBUG */
-#ifdef H5_HAVE_PARALLEL
- entry_ptr->coll_next = NULL;
- entry_ptr->coll_prev = NULL;
-#endif /* H5_HAVE_PARALLEL */
-
/* Apply tag to newly inserted entry */
if(H5C__tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
@@ -1556,7 +1534,7 @@ H5C_insert_entry(H5F_t * f,
if(H5C__make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed")
- }
+ } /* end if */
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
@@ -2542,6 +2520,8 @@ H5C_protect(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
entry_ptr = (H5C_cache_entry_t *)thing;
+ cache_ptr->entries_loaded_counter++;
+
entry_ptr->ring = ring;
#ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access)
@@ -2632,7 +2612,7 @@ H5C_protect(H5F_t * f,
if(H5C__make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0 )
HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
- }
+ } /* end if */
/* Insert the entry in the hash table. It can't be dirty yet, so
* we don't even check to see if it should go in the skip list.
@@ -2667,13 +2647,10 @@ H5C_protect(H5F_t * f,
*/
H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL)
- /* Update entries loaded in cache counter */
- cache_ptr->entries_loaded_counter++;
-
/* Record that the entry was loaded, to trigger a notify callback later */
/* (After the entry is fully added to the cache) */
was_loaded = TRUE;
- }
+ } /* end else */
HDassert(entry_ptr->addr == addr);
HDassert(entry_ptr->type == type);
@@ -3490,23 +3467,20 @@ done:
*
* Function: H5C_unsettle_entry_ring
*
- * Purpose: Advise the metadata cache that the specified entry's metadata
- * cache manager ring is no longer settled (if it was on entry).
+ * Purpose: Advise the metadata cache that the specified entry's free space
+ * manager ring is no longer settled (if it was on entry).
*
- * If the target metadata cache manager ring is already
+ * If the target free space manager ring is already
* unsettled, do nothing, and return SUCCEED.
*
- * If the target metadata cache manager ring is settled, and
+ * If the target free space manager ring is settled, and
* we are not in the process of a file shutdown, mark
* the ring as unsettled, and return SUCCEED.
*
- * If the target metadata cache manager is settled, and we
+ * If the target free space manager is settled, and we
* are in the process of a file shutdown, post an error
* message, and return FAIL.
*
- * Note that this function simply passes the call on to
- * the metadata cache proper, and returns the result.
- *
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
@@ -4565,19 +4539,6 @@ done:
*
* Programmer: John Mainzer, 11/22/04
*
- * Changes: Modified function to detect deletions of entries
- * during a scan of the LRU, and where appropriate,
- * restart the scan to avoid proceeding with a next
- * entry that is no longer in the cache.
- *
- * Note the absence of checks after flushes of clean
- * entries. As a second entry can only be removed by
- * by a call to the pre_serialize or serialize callback
- * of the first, and as these callbacks will not be called
- * on clean entries, no checks are needed.
- *
- * JRM -- 4/6/15
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -4669,39 +4630,26 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
}
- if ( prev_ptr != NULL ) {
-
+ if(prev_ptr != NULL) {
if(corked) /* dirty corked entry is skipped */
entry_ptr = prev_ptr;
-
- else if ( ( restart_scan )
- ||
- ( prev_ptr->is_dirty != prev_is_dirty )
- ||
- ( prev_ptr->next != next_ptr )
- ||
- ( prev_ptr->is_protected )
- ||
- ( prev_ptr->is_pinned ) ) {
-
- /* something has happened to the LRU -- start over
+ else if(restart_scan || (prev_ptr->is_dirty != prev_is_dirty)
+ || (prev_ptr->next != next_ptr)
+ || (prev_ptr->is_protected)
+ || (prev_ptr->is_pinned)) {
+ /* Something has happened to the LRU -- start over
* from the tail.
*/
restart_scan = FALSE;
entry_ptr = cache_ptr->LRU_tail_ptr;
H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
-
- } else {
-
+ } /* end else-if */
+ else
entry_ptr = prev_ptr;
-
- }
- } else {
-
+ } /* end if */
+ else
entry_ptr = NULL;
-
- }
} /* end while */
/* for now at least, don't bother to maintain the minimum clean size,
@@ -5340,7 +5288,7 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
H5C_cache_entry_t *entry_ptr = NULL;
H5C_cache_entry_t *next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t initial_slist_len = 0;
+ uint32_t initial_slist_len = 0;
size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
herr_t ret_value = SUCCEED;
@@ -5574,8 +5522,8 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
*/
if(node_ptr == NULL) {
- HDassert(cache_ptr->slist_len == (initial_slist_len + cache_ptr->slist_len_increase));
- HDassert((int64_t)cache_ptr->slist_size == ((int64_t)initial_slist_size + cache_ptr->slist_size_increase));
+ HDassert(cache_ptr->slist_len == (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
+ HDassert(cache_ptr->slist_size == (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
} /* end if */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -5777,7 +5725,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t initial_slist_len = 0;
+ uint32_t initial_slist_len = 0;
size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
int i;
@@ -5982,8 +5930,8 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
#if H5C_DO_SANITY_CHECKS
/* Verify that the slist size and length are as expected. */
- HDassert((initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
- HDassert((size_t)((int64_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
+ HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
+ HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
#endif /* H5C_DO_SANITY_CHECKS */
} /* while */
@@ -6929,38 +6877,6 @@ done:
*
* Programmer: John Mainzer, 5/14/04
*
- * Changes: Modified function to skip over entries with the
- * flush_in_progress flag set. If this is not done,
- * an infinite recursion is possible if the cache is
- * full, and the pre-serialize or serialize routine
- * attempts to load another entry.
- *
- * This error was exposed by a re-factor of the
- * H5C__flush_single_entry() routine. However, it was
- * a potential bug from the moment that entries were
- * allowed to load other entries on flush.
- *
- * In passing, note that the primary and secondary dxpls
- * mentioned in the comment above have been replaced by
- * a single dxpl at some point, and thus the discussion
- * above is somewhat obsolete. Date of this change is
- * unkown.
- *
- * JRM -- 12/26/14
- *
- * Modified function to detect deletions of entries
- * during a scan of the LRU, and where appropriate,
- * restart the scan to avoid proceeding with a next
- * entry that is no longer in the cache.
- *
- * Note the absence of checks after flushes of clean
- * entries. As a second entry can only be removed by
- * by a call to the pre_serialize or serialize callback
- * of the first, and as these callbacks will not be called
- * on clean entries, no checks are needed.
- *
- * JRM -- 4/6/15
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -6993,21 +6909,15 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
if ( write_permitted ) {
-
restart_scan = FALSE;
initial_list_len = cache_ptr->LRU_list_len;
entry_ptr = cache_ptr->LRU_tail_ptr;
- if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
-
+ if(cache_ptr->index_size >= cache_ptr->max_cache_size)
empty_space = 0;
-
- } else {
-
+ else
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- }
-
while ( ( ( (cache_ptr->index_size + space_needed)
>
cache_ptr->max_cache_size
@@ -8064,6 +7974,515 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_cache
+ *
+ * Purpose: Serialize (i.e. construct an on disk image) for all entries
+ * in the metadata cache including clean entries.
+ *
+ * Note that flush dependencies and "flush me last" flags
+ * must be observed in the serialization process.
+ *
+ * Note also that entries may be loaded, flushed, evicted,
+ * expunged, relocated, resized, or removed from the cache
+ * during this process, just as these actions may occur during
+ * a regular flush.
+ *
+ * However, we are given that the cache will contain no protected
+ * entries on entry to this routine (although entries may be
+ * briefly protected and then unprotected during the serialize
+ * process).
+ *
+ * The objective of this routine is serialize all entries and
+ * to force all entries into their actual locations on disk.
+ *
+ * The initial need for this routine is to settle all entries
+ * in the cache prior to construction of the metadata cache
+ * image so that the size of the cache image can be calculated.
+ * However, I gather that other uses for the routine are
+ * under consideration.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 7/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__serialize_cache(H5F_t *f, hid_t dxpl_id)
+{
+#if H5C_DO_SANITY_CHECKS
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ H5C_ring_t ring;
+ H5C_t * cache_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+#if H5C_DO_SANITY_CHECKS
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+#ifndef NDEBUG
+ /* if this is a debug build, set the serialization_count field of
+ * each entry in the cache to zero before we start the serialization.
+ * This allows us to detect the case in which any entry is serialized
+ * more than once (a performance issues), and more importantly, the
+ * case is which any flush depencency parent is serializes more than
+ * once (a correctness issue).
+ */
+ {
+ H5C_cache_entry_t * scan_ptr = NULL;
+
+ scan_ptr = cache_ptr->il_head;
+ while(scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ scan_ptr->serialization_count = 0;
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
+
+ /* set cache_ptr->serialization_in_progress to TRUE, and back
+ * to FALSE at the end of the function. Must maintain this flag
+ * to support H5C_get_serialization_in_progress(), which is in
+ * turn required to support sanity checking in some cache
+ * clients.
+ */
+ HDassert(!cache_ptr->serialization_in_progress);
+ cache_ptr->serialization_in_progress = TRUE;
+
+ /* Serialize each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ HDassert(cache_ptr->close_warning_received);
+ switch(ring) {
+ case H5C_RING_USER:
+ break;
+
+ case H5C_RING_RDFSM:
+ /* Settle raw data FSM */
+ if(!cache_ptr->rdfsm_settled)
+ if(H5MF_settle_raw_data_fsm(f, dxpl_id, &cache_ptr->rdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
+ break;
+
+ case H5C_RING_MDFSM:
+ /* Settle metadata FSM */
+ if(!cache_ptr->mdfsm_settled)
+ if(H5MF_settle_meta_data_fsm(f, dxpl_id, &cache_ptr->mdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
+ break;
+
+ case H5C_RING_SBE:
+ case H5C_RING_SB:
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
+ break;
+ } /* end switch */
+
+ if(H5C__serialize_ring(f, dxpl_id, ring) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
+
+ ring++;
+ } /* end while */
+
+#ifndef NDEBUG
+ /* Verify that no entry has been serialized more than once.
+ * FD parents with multiple serializations should have been caught
+ * elsewhere, so no specific check for them here.
+ */
+ {
+ H5C_cache_entry_t * scan_ptr = NULL;
+
+ scan_ptr = cache_ptr->il_head;
+ while(scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(scan_ptr->serialization_count <= 1);
+
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
+
+done:
+ cache_ptr->serialization_in_progress = FALSE;
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_ring
+ *
+ * Purpose: Serialize the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been serialized on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be serialized. However all unprotected entries in the
+ * target ring should be serialized before the function
+ * returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the index list
+ * serializing entries in flush dependency order.
+ *
+ * All entries outside the H5C_RING_SBE are marked for
+ * inclusion in the cache image. Entries in H5C_RING_SBE
+ * and below are marked for exclusion from the image.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/11/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring)
+{
+ hbool_t done = FALSE;
+ H5C_t * cache_ptr;
+ H5C_cache_entry_t * entry_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->serialization_in_progress);
+
+ /* The objective here is to serialize all entries in the cache ring
+ * in flush dependency order.
+ *
+ * The basic algorithm is to scan the cache index list looking for
+ * unserialized entries that are either not in a flush dependency
+ * relationship, or which have no unserialized children. Any such
+ * entry is serialized and its flush dependency parents (if any) are
+ * informed -- allowing them to decrement their userialized child counts.
+ *
+ * However, this algorithm is complicated by the ability
+ * of client serialization callbacks to perform operations on
+ * on the cache which can result in the insertion, deletion,
+ * relocation, resize, dirty, flush, eviction, or removal (via the
+ * take ownership flag) of entries. Changes in the flush dependency
+ * structure are also possible.
+ *
+ * On the other hand, the algorithm is simplified by the fact that
+ * we are serializing, not flushing. Thus, as long as all entries
+ * are serialized correctly, it doesn't matter if we have to go back
+ * and serialize an entry a second time.
+ *
+ * These possible actions result in the following modfications to
+ * tha basic algorithm:
+ *
+ * 1) In the event of an entry expunge, eviction or removal, we must
+ * restart the scan as it is possible that the next entry in our
+ * scan is no longer in the cache. Were we to examine this entry,
+ * we would be accessing deallocated memory.
+ *
+ * 2) A resize, dirty, or insertion of an entry may result in the
+ * the increment of a flush dependency parent's dirty and/or
+ * unserialized child count. In the context of serializing the
+ * the cache, this is a non-issue, as even if we have already
+ * serialized the parent, it will be marked dirty and its image
+ * marked out of date if appropriate when the child is serialized.
+ *
+ * However, this is a major issue for a flush, as were this to happen
+ * in a flush, it would violate the invariant that the flush dependency
+ * feature is intended to enforce. As the metadata cache has no
+ * control over the behavior of cache clients, it has no way of
+ * preventing this behaviour. However, it should detect it if at all
+ * possible.
+ *
+ * Do this by maintaining a count of the number of times each entry is
+ * serialized during a cache serialization. If any flush dependency
+ * parent is serialized more than once, throw an assertion failure.
+ *
+ * 3) An entry relocation will typically change the location of the
+ * entry in the index list. This shouldn't cause problems as we
+ * will scan the index list until we make a complete pass without
+ * finding anything to serialize -- making relocations of either
+ * the current or next entries irrelevant.
+ *
+ * Note that since a relocation may result in our skipping part of
+ * the index list, we must always do at least one more pass through
+ * the index list after an entry relocation.
+ *
+ * 4) Changes in the flush dependency structure are possible on
+ * entry insertion, load, expunge, evict, or remove. Destruction
+ * of a flush dependency has no effect, as it can only relax the
+ * flush dependencies. Creation of a flush dependency can create
+ * an unserialized child of a flush dependency parent where all
+ * flush dependency children were previously serialized. Should
+ * this child dirty the flush dependency parent when it is serialized,
+ * the parent will be re-serialized.
+ *
+ * Per the discussion of 2) above, this is a non issue for cache
+ * serialization, and a major problem for cache flush. Using the
+ * same detection mechanism, throw an assertion failure if this
+ * condition appears.
+ *
+ * Observe that either eviction or removal of entries as a result of
+ * a serialization is not a problem as long as the flush depencency
+ * tree does not change beyond the removal of a leaf.
+ */
+ while(!done) {
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ done = TRUE; /* set to FALSE if any activity in inner loop */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Verify that either the entry is already serialized, or
+ * that it is assigned to either the target or an inner
+ * ring.
+ */
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ /* Skip flush me last entries or inner ring entries */
+ if(!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
+
+ /* if we encounter an unserialized entry in the current
+ * ring that is not marked flush me last, we are not done.
+ */
+ if(!entry_ptr->image_up_to_date)
+ done = FALSE;
+
+ /* Serialize the entry if its image is not up to date
+ * and it has no unserialized flush dependency children.
+ */
+ if(!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
+ HDassert(entry_ptr->serialization_count == 0);
+
+ /* Serialize the entry */
+ if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
+
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+
+ /* Check for the cache being perturbed during the entry serialize */
+ if((cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0)) {
+
+#if H5C_COLLECT_CACHE_STATS
+ H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ /* Reset the counters */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* Restart scan */
+ entry_ptr = cache_ptr->il_head;
+ } /* end if */
+ else
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+ } /* while ( ! done ) */
+
+
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* At this point, all entries not marked "flush me last" and in
+ * the current ring or outside it should be serialized and have up
+ * to date images. Scan the index list again to serialize the
+ * "flush me last" entries (if they are in the current ring) and to
+ * verify that all other entries have up to date images.
+ */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
+ HDassert(entry_ptr->ring < H5C_RING_NTYPES);
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ if(entry_ptr->ring == ring) {
+ if(entry_ptr->flush_me_last) {
+ if(!entry_ptr->image_up_to_date) {
+ HDassert(entry_ptr->serialization_count == 0);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+
+ /* Serialize the entry */
+ if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ /* Check for the cache changing */
+ if((cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush_me_last entry serialization triggered restart")
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+ else {
+ HDassert(entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->serialization_count <= 1);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ } /* end else */
+ } /* if ( entry_ptr->ring == ring ) */
+
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+
+done:
+ HDassert(cache_ptr->serialization_in_progress);
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_ring() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_single_entry
+ *
+ * Purpose: Serialize the cache entry pointed to by the entry_ptr
+ * parameter.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer, 7/24/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
+ H5C_cache_entry_t *entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->prefetched);
+ HDassert(!entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->flush_in_progress);
+ HDassert(entry_ptr->type);
+
+ /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
+ * will not be evicted out from under us. Must set it back to FALSE
+ * when we are done.
+ */
+ entry_ptr->flush_in_progress = TRUE;
+
+ /* Allocate buffer for the entry image if required. */
+ if(NULL == entry_ptr->image_ptr) {
+ HDassert(entry_ptr->size > 0);
+ if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)) )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ } /* end if */
+
+ /* Generate image for entry */
+ if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
+
+ /* Reset the flush_in progress flag */
+ entry_ptr->flush_in_progress = FALSE;
+
+done:
+ HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
+ HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_single_entry() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C__generate_image
*
* Purpose: Serialize an entry and generate its image.
diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c
index 3dbe86c..eb5f123 100644
--- a/src/H5Cdbg.c
+++ b/src/H5Cdbg.c
@@ -54,10 +54,6 @@
/* Local Prototypes */
/********************/
-#if 0 /* debugging routines */
-herr_t H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn);
-#endif /* debugging routines */
-
/*********************/
/* Package Variables */
@@ -195,7 +191,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-#if 0 /* debugging routine */
+#ifndef NDEBUG
herr_t
H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
{
@@ -204,7 +200,7 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
H5C_cache_entry_t * entry_ptr = NULL;
H5SL_node_t * node_ptr = NULL;
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI_NOERR
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
@@ -258,10 +254,9 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
HDfprintf(stdout, "\n\n");
-done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_dump_cache_skip_list() */
-#endif /* debugging routine */
+#endif /* NDEBUG */
/*-------------------------------------------------------------------------
@@ -655,7 +650,7 @@ H5C_stats(H5C_t * cache_ptr,
cache_ptr->prefix,
cache_ptr->images_created,
cache_ptr->images_loaded,
- (long long)cache_ptr->last_image_size);
+ cache_ptr->last_image_size);
HDfprintf(stdout,
"%s prefetches / dirty prefetches = %lld / %lld\n",
@@ -905,7 +900,7 @@ H5C_stats__reset(H5C_t H5_ATTR_UNUSED * cache_ptr)
cache_ptr->images_created = 0;
cache_ptr->images_loaded = 0;
- cache_ptr->last_image_size = (size_t)0;
+ cache_ptr->last_image_size = (hsize_t)0;
cache_ptr->prefetches = 0;
cache_ptr->dirty_prefetches = 0;
@@ -1325,11 +1320,12 @@ H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring)
while(ring <= inner_ring) {
if(cache_ptr->dirty_index_ring_size[ring] > 0)
- ret_value = FALSE;
+ HGOTO_DONE(FALSE)
ring++;
} /* end while */
+done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_cache_is_clean() */
#endif /* NDEBUG */
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
index a505f5c..1da2545 100644
--- a/src/H5Cimage.c
+++ b/src/H5Cimage.c
@@ -83,7 +83,7 @@
#define H5C_IMAGE_ENTRY_T_BAD_MAGIC 0xBeefDead
/* Maximum ring allowed in image */
-#define H5C_MAX_RING_IN_IMAGE 3
+#define H5C_MAX_RING_IN_IMAGE H5C_RING_MDFSM
/******************/
@@ -120,11 +120,6 @@ static herr_t H5C__reconstruct_cache_contents(H5F_t *f, hid_t dxpl_id,
H5C_t *cache_ptr);
static H5C_cache_entry_t *H5C__reconstruct_cache_entry(const H5F_t *f,
H5C_t *cache_ptr, const uint8_t **buf);
-static herr_t H5C__serialize_cache(H5F_t *f, hid_t dxpl_id);
-static herr_t H5C__serialize_ring(H5F_t *f, hid_t dxpl_id,
- H5C_ring_t ring);
-static herr_t H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id,
- H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
static herr_t H5C__write_cache_image_superblock_msg(H5F_t *f, hid_t dxpl_id,
hbool_t create);
static herr_t H5C__read_cache_image(H5F_t * f, hid_t dxpl_id, const H5C_t *cache_ptr);
@@ -1257,186 +1252,199 @@ H5C__prep_image_for_file_close(H5F_t *f, hid_t dxpl_id)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- /* Create the cache image super block extension message.
- *
- * Note that the base address and length of the metadata cache
- * image are undefined at this point, and thus will have to be
- * updated later.
- *
- * Create the super block extension message now so that space
- * is allocated for it (if necessary) before we allocate space
- * for the cache image block.
- *
- * To simplify testing, do this only if the
- * H5C_CI__GEN_MDCI_SBE_MESG bit is set in
- * cache_ptr->image_ctl.flags.
+ /* If the file is opened and closed without any access to
+ * any group or data set, it is possible that the cache image (if
+ * it exists) has not been read yet. Do this now if required.
*/
- if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDCI_SBE_MESG)
- if(H5C__write_cache_image_superblock_msg(f, dxpl_id, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "creation of cache image SB mesg failed.")
+ if(cache_ptr->load_image) {
+ cache_ptr->load_image = FALSE;
+ if(H5C__load_cache_image(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "can't load cache image")
+ } /* end if */
+
+ /* Generate the cache image, if requested */
+ if(cache_ptr->image_ctl.generate_image) {
+ /* Create the cache image super block extension message.
+ *
+ * Note that the base address and length of the metadata cache
+ * image are undefined at this point, and thus will have to be
+ * updated later.
+ *
+ * Create the super block extension message now so that space
+ * is allocated for it (if necessary) before we allocate space
+ * for the cache image block.
+ *
+ * To simplify testing, do this only if the
+ * H5C_CI__GEN_MDCI_SBE_MESG bit is set in
+ * cache_ptr->image_ctl.flags.
+ */
+ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDCI_SBE_MESG)
+ if(H5C__write_cache_image_superblock_msg(f, dxpl_id, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "creation of cache image SB mesg failed.")
- /* Serialize the cache */
- if(H5C__serialize_cache(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "serialization of the cache failed")
+ /* Serialize the cache */
+ if(H5C__serialize_cache(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "serialization of the cache failed")
- /* Scan the cache and record data needed to construct the
- * cache image. In particular, for each entry we must record:
- *
- * 1) rank in LRU (if entry is in LRU)
- *
- * 2) Whether the entry is dirty prior to flush of
- * cache just prior to close.
- *
- * 3) Addresses of flush dependency parents (if any).
- *
- * 4) Number of flush dependency children (if any).
- *
- * In passing, also compute the size of the metadata cache
- * image. With the recent modifications of the free space
- * manager code, this size should be correct.
- */
- if(H5C__prep_for_file_close__scan_entries(f, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__prep_for_file_close__scan_entries failed")
- HDassert(HADDR_UNDEF == cache_ptr->image_addr);
+ /* Scan the cache and record data needed to construct the
+ * cache image. In particular, for each entry we must record:
+ *
+ * 1) rank in LRU (if entry is in LRU)
+ *
+ * 2) Whether the entry is dirty prior to flush of
+ * cache just prior to close.
+ *
+ * 3) Addresses of flush dependency parents (if any).
+ *
+ * 4) Number of flush dependency children (if any).
+ *
+ * In passing, also compute the size of the metadata cache
+ * image. With the recent modifications of the free space
+ * manager code, this size should be correct.
+ */
+ if(H5C__prep_for_file_close__scan_entries(f, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__prep_for_file_close__scan_entries failed")
+ HDassert(HADDR_UNDEF == cache_ptr->image_addr);
#ifdef H5_HAVE_PARALLEL
- /* In the parallel case, overwrite the image_len with the
- * value computed by process 0.
- */
- if(cache_ptr->aux_ptr) { /* we have multiple processes */
- int mpi_result;
- unsigned p0_image_len;
- H5AC_aux_t * aux_ptr;
+ /* In the parallel case, overwrite the image_len with the
+ * value computed by process 0.
+ */
+ if(cache_ptr->aux_ptr) { /* we have multiple processes */
+ int mpi_result;
+ unsigned p0_image_len;
+ H5AC_aux_t * aux_ptr;
- aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
- if(aux_ptr->mpi_rank == 0) {
- aux_ptr->p0_image_len = (unsigned)cache_ptr->image_data_len;
- p0_image_len = aux_ptr->p0_image_len;
+ aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
+ if(aux_ptr->mpi_rank == 0) {
+ aux_ptr->p0_image_len = (unsigned)cache_ptr->image_data_len;
+ p0_image_len = aux_ptr->p0_image_len;
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
- HDassert(p0_image_len == aux_ptr->p0_image_len);
+ HDassert(p0_image_len == aux_ptr->p0_image_len);
+ } /* end if */
+ else {
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
+
+ aux_ptr->p0_image_len = p0_image_len;
+ } /* end else */
+
+ /* Allocate space for a cache image of size equal to that
+ * computed by the process 0. This may be different from
+ * cache_ptr->image_data_len if mpi_rank != 0. However, since
+ * cache image write is suppressed on all processes other than
+ * process 0, this doesn't matter.
+ *
+ * Note that we allocate the cache image directly from the file
+ * driver so as to avoid unsettling the free space managers.
+ */
+ if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, dxpl_id, H5FD_MEM_SUPER, f,
+ (hsize_t)p0_image_len, &eoa_frag_addr, &eoa_frag_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, "can't allocate file space for metadata cache image")
} /* end if */
- else {
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
-
- aux_ptr->p0_image_len = p0_image_len;
- } /* end else */
+ else
+#endif /* H5_HAVE_PARALLEL */
+ /* Allocate the cache image block. Note that we allocate this
+ * this space directly from the file driver so as to avoid
+ * unsettling the free space managers.
+ */
+ if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, dxpl_id, H5FD_MEM_SUPER, f,
+ (hsize_t)(cache_ptr->image_data_len), &eoa_frag_addr, &eoa_frag_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, "can't allocate file space for metadata cache image")
- /* Allocate space for a cache image of size equal to that
- * computed by the process 0. This may be different from
- * cache_ptr->image_data_len if mpi_rank != 0. However, since
- * cache image write is suppressed on all processes other than
- * process 0, this doesn't matter.
+ /* For now, drop any fragment left over from the allocation of the
+ * image block on the ground. A fragment should only be returned
+ * if the underlying file alignment is greater than 1.
+ *
+ * Clean this up eventually by extending the size of the cache
+ * image block to the next alignement boundary, and then setting
+ * the image_data_len to the actual size of the cache_image.
*
- * Note that we allocate the cache image directly from the file
- * driver so as to avoid unsettling the free space managers.
+ * On the off chance that there is some other way to get a
+ * a fragment on a cache image allocation, leave the following
+ * assertion in the code so we will find out.
*/
- if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, dxpl_id, H5FD_MEM_SUPER, f,
- (hsize_t)p0_image_len, &eoa_frag_addr, &eoa_frag_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, "can't allocate file space for metadata cache image")
- } /* end if */
- else
-#endif /* H5_HAVE_PARALLEL */
- /* Allocate the cache image block. Note that we allocate this
- * this space directly from the file driver so as to avoid
- * unsettling the free space managers.
+ HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1));
+
+ /* Eventually it will be possible for the length of the cache image
+ * block on file to be greater than the size of the data it
+ * contains. However, for now they must be the same. Set
+ * cache_ptr->image_len accordingly.
*/
- if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, dxpl_id, H5FD_MEM_SUPER, f,
- (hsize_t)(cache_ptr->image_data_len), &eoa_frag_addr, &eoa_frag_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, "can't allocate file space for metadata cache image")
+ cache_ptr->image_len = cache_ptr->image_data_len;
- /* For now, drop any fragment left over from the allocation of the
- * image block on the ground. A fragment should only be returned
- * if the underlying file alignment is greater than 1.
- *
- * Clean this up eventually by extending the size of the cache
- * image block to the next alignement boundary, and then setting
- * the image_data_len to the actual size of the cache_image.
- *
- * On the off chance that there is some other way to get a
- * a fragment on a cache image allocation, leave the following
- * assertion in the code so we will find out.
- */
- HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1));
+ /* update the metadata cache image superblock extension
+ * message with the new cache image block base address and
+ * length.
+ *
+ * to simplify testing, do this only if the
+ * H5C_CI__GEN_MDC_IMAGE_BLK bit is set in
+ * cache_ptr->image_ctl.flags.
+ */
+ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
+ if(H5C__write_cache_image_superblock_msg(f, dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "update of cache image SB mesg failed.")
- /* Eventually it will be possible for the length of the cache image
- * block on file to be greater than the size of the data it
- * contains. However, for now they must be the same. Set
- * cache_ptr->image_len accordingly.
- */
- cache_ptr->image_len = cache_ptr->image_data_len;
+ /* At this point:
+ *
+ * 1) space in the file for the metadata cache image
+ * is allocated,
+ *
+ * 2) the metadata cache image superblock extension
+ * message exists and (if so configured) contains
+ * the correct data,
+ *
+ * 3) All entries in the cache that will appear in the
+ * cache image are serialized with up to date images.
+ *
+ * Since we just updated the cache image message,
+ * the super block extension message is dirty. However,
+ * since the superblock and the superblock extension
+ * can't be included in the cache image, this is a non-
+ * issue.
+ *
+ * 4) All entries in the cache that will be include in
+ * the cache are marked as such, and we have a count
+ * of same.
+ *
+ * 5) Flush dependency heights are calculated for all
+ * entries that will be included in the cache image.
+ *
+ * If there are any entries to be included in the metadata cache
+ * image, allocate, populate, and sort the image_entries array.
+ *
+ * If the metadata cache image will be empty, delete the
+ * metadata cache image superblock extension message, set
+ * cache_ptr->image_ctl.generate_image to FALSE. This will
+ * allow the file close to continue normally without the
+ * unecessary generation of the metadata cache image.
+ */
+ if(cache_ptr->num_entries_in_image > 0) {
+ if(H5C__prep_for_file_close__setup_image_entries_array(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINIT, FAIL, "can't setup image entries array.")
- /* update the metadata cache image superblock extension
- * message with the new cache image block base address and
- * length.
- *
- * to simplify testing, do this only if the
- * H5C_CI__GEN_MDC_IMAGE_BLK bit is set in
- * cache_ptr->image_ctl.flags.
- */
- if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
- if(H5C__write_cache_image_superblock_msg(f, dxpl_id, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "update of cache image SB mesg failed.")
+ /* Sort the entries */
+ HDqsort(cache_ptr->image_entries, (size_t)cache_ptr->num_entries_in_image,
+ sizeof(H5C_image_entry_t), H5C__image_entry_cmp);
+ } /* end if */
+ else { /* cancel creation of metadata cache image */
+ HDassert(cache_ptr->image_entries == NULL);
- /* At this point:
- *
- * 1) space in the file for the metadata cache image
- * is allocated,
- *
- * 2) the metadata cache image superblock extension
- * message exists and (if so configured) contains
- * the correct data,
- *
- * 3) All entries in the cache that will appear in the
- * cache image are serialized with up to date images.
- *
- * Since we just updated the cache image message,
- * the super block extension message is dirty. However,
- * since the superblock and the superblock extension
- * can't be included in the cache image, this is a non-
- * issue.
- *
- * 4) All entries in the cache that will be include in
- * the cache are marked as such, and we have a count
- * of same.
- *
- * 5) Flush dependency heights are calculated for all
- * entries that will be included in the cache image.
- *
- * If there are any entries to be included in the metadata cache
- * image, allocate, populate, and sort the image_entries array.
- *
- * If the metadata cache image will be empty, delete the
- * metadata cache image superblock extension message, set
- * cache_ptr->image_ctl.generate_image to FALSE. This will
- * allow the file close to continue normally without the
- * unecessary generation of the metadata cache image.
- */
- if(cache_ptr->num_entries_in_image > 0) {
- if(H5C__prep_for_file_close__setup_image_entries_array(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINIT, FAIL, "can't setup image entries array.")
+ /* To avoid breaking the control flow tests, only delete
+ * the mdci superblock extension message if the
+ * H5C_CI__GEN_MDC_IMAGE_BLK flag is set in
+ * cache_ptr->image_ctl.flags.
+ */
+ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
+ if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_MDCI_MSG_ID) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove MDC image msg from superblock ext.")
- /* Sort the entries */
- HDqsort(cache_ptr->image_entries, (size_t)cache_ptr->num_entries_in_image,
- sizeof(H5C_image_entry_t), H5C__image_entry_cmp);
+ cache_ptr->image_ctl.generate_image = FALSE;
+ } /* end else */
} /* end if */
- else { /* cancel creation of metadata cache iamge */
- HDassert(cache_ptr->image_entries == NULL);
-
- /* To avoid breaking the control flow tests, only delete
- * the mdci superblock extension message if the
- * H5C_CI__GEN_MDC_IMAGE_BLK flag is set in
- * cache_ptr->image_ctl.flags.
- */
- if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
- if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_MDCI_MSG_ID) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove MDC image msg from superblock ext.")
-
- cache_ptr->image_ctl.generate_image = FALSE;
- } /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1706,7 +1714,7 @@ H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr,
actual_header_len = (size_t)(p - *buf);
expected_header_len = H5C__cache_image_block_header_size(f);
if(actual_header_len != expected_header_len)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad header image len.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad header image len")
/* Update buffer pointer */
*buf = p;
@@ -2560,24 +2568,15 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr)
HDassert(cache_ptr->image_entries == NULL);
/* Allocate and initialize image_entries array */
- if(NULL == (image_entries = (H5C_image_entry_t *)H5MM_malloc(sizeof(H5C_image_entry_t) * (size_t)(cache_ptr->num_entries_in_image + 1))))
+ if(NULL == (image_entries = (H5C_image_entry_t *)H5MM_calloc(sizeof(H5C_image_entry_t) * (size_t)(cache_ptr->num_entries_in_image + 1))))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for image_entries")
+ /* Initialize (non-zero/NULL/FALSE) fields */
for(u = 0; u <= cache_ptr->num_entries_in_image; u++) {
image_entries[u].magic = H5C_IMAGE_ENTRY_T_MAGIC;
image_entries[u].addr = HADDR_UNDEF;
- image_entries[u].size = 0;
image_entries[u].ring = H5C_RING_UNDEFINED;
- image_entries[u].age = 0;
image_entries[u].type_id = -1;
- image_entries[u].lru_rank = 0;
- image_entries[u].is_dirty = FALSE;
- image_entries[u].image_fd_height = 0;
- image_entries[u].fd_parent_count = 0;
- image_entries[u].fd_parent_addrs = NULL;
- image_entries[u].fd_child_count = 0;
- image_entries[u].fd_dirty_child_count = 0;
- image_entries[u].image_ptr = NULL;
} /* end for */
/* Scan each entry on the index list and populate the image_entries array */
@@ -3115,6 +3114,7 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__reconstruct_cache_contents() */
+
/*-------------------------------------------------------------------------
* Function: H5C__reconstruct_cache_entry()
*
@@ -3145,6 +3145,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
hbool_t is_fd_child = FALSE;
#endif /* NDEBUG */ /* only used in assertions */
const uint8_t * p;
+ hbool_t file_is_rw;
H5C_cache_entry_t *ret_value = NULL; /* Return value */
FUNC_ENTER_STATIC
@@ -3155,13 +3156,16 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
HDassert(cache_ptr->num_entries_in_image > 0);
HDassert(buf && *buf);
- /* Get pointer to buffer */
- p = *buf;
+ /* Key R/W access off of whether the image will be deleted */
+ file_is_rw = cache_ptr->delete_image;
/* Allocate space for the prefetched cache entry */
if(NULL == (pf_entry_ptr = H5FL_CALLOC(H5C_cache_entry_t)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for prefetched cache entry")
+ /* Get pointer to buffer */
+ p = *buf;
+
/* Decode type id */
pf_entry_ptr->prefetch_type_id = *p++;
@@ -3184,7 +3188,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
* extension message and the cache image block will not be removed.
* Hence no danger in this.
*/
- pf_entry_ptr->is_dirty = (is_dirty && cache_ptr->delete_image);
+ pf_entry_ptr->is_dirty = (is_dirty && file_is_rw);
/* Decode ring */
pf_entry_ptr->ring = *p++;
@@ -3252,15 +3256,12 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
HDmemcpy(pf_entry_ptr->image_ptr, p, pf_entry_ptr->size);
p += pf_entry_ptr->size;
-
/* Initialize the rest of the fields in the prefetched entry */
/* (Only need to set non-zero/NULL/FALSE fields, due to calloc() above) */
pf_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
pf_entry_ptr->cache_ptr = cache_ptr;
pf_entry_ptr->image_up_to_date = TRUE;
pf_entry_ptr->type = H5AC_PREFETCHED_ENTRY;
-
- /* Initialize cache image related fields */
pf_entry_ptr->prefetched = TRUE;
/* Sanity checks */
@@ -3280,529 +3281,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5C__serialize_cache
- *
- * Purpose: Serialize (i.e. construct an on disk image) for all entries
- * in the metadata cache including clean entries.
- *
- * Note that flush dependencies and "flush me last" flags
- * must be observed in the serialization process.
- *
- * Note also that entries may be loaded, flushed, evicted,
- * expunged, relocated, resized, or removed from the cache
- * during this process, just as these actions may occur during
- * a regular flush.
- *
- * However, we are given that the cache will contain no protected
- * entries on entry to this routine (although entries may be
- * briefly protected and then unprotected during the serialize
- * process).
- *
- * The objective of this routine is serialize all entries and
- * to force all entries into their actual locations on disk.
- *
- * The initial need for this routine is to settle all entries
- * in the cache prior to construction of the metadata cache
- * image so that the size of the cache image can be calculated.
- * However, I gather that other uses for the routine are
- * under consideration.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 7/22/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__serialize_cache(H5F_t *f, hid_t dxpl_id)
-{
-#if H5C_DO_SANITY_CHECKS
- int i;
- uint32_t index_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
- uint32_t slist_len = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- H5C_ring_t ring;
- H5C_t * cache_ptr;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_STATIC
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
-
-#if H5C_DO_SANITY_CHECKS
- HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
-
- for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
- index_len += cache_ptr->index_ring_len[i];
- index_size += cache_ptr->index_ring_size[i];
- clean_index_size += cache_ptr->clean_index_ring_size[i];
- dirty_index_size += cache_ptr->dirty_index_ring_size[i];
-
- slist_len += cache_ptr->slist_ring_len[i];
- slist_size += cache_ptr->slist_ring_size[i];
- } /* end for */
-
- HDassert(cache_ptr->index_len == index_len);
- HDassert(cache_ptr->index_size == index_size);
- HDassert(cache_ptr->clean_index_size == clean_index_size);
- HDassert(cache_ptr->dirty_index_size == dirty_index_size);
- HDassert(cache_ptr->slist_len == slist_len);
- HDassert(cache_ptr->slist_size == slist_size);
-#endif /* H5C_DO_SANITY_CHECKS */
-
-#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
-#ifndef NDEBUG
- /* if this is a debug build, set the serialization_count field of
- * each entry in the cache to zero before we start the serialization.
- * This allows us to detect the case in which any entry is serialized
- * more than once (a performance issues), and more importantly, the
- * case is which any flush depencency parent is serializes more than
- * once (a correctness issue).
- */
- {
- H5C_cache_entry_t * scan_ptr = NULL;
-
- scan_ptr = cache_ptr->il_head;
- while(scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- scan_ptr->serialization_count = 0;
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif /* NDEBUG */
-
- /* set cache_ptr->serialization_in_progress to TRUE, and back
- * to FALSE at the end of the function. Must maintain this flag
- * to support H5C_get_serialization_in_progress(), which is in
- * turn required to support sanity checking in some cache
- * clients.
- */
- HDassert(!cache_ptr->serialization_in_progress);
- cache_ptr->serialization_in_progress = TRUE;
-
- /* Serialize each ring, starting from the outermost ring and
- * working inward.
- */
- ring = H5C_RING_USER;
- while(ring < H5C_RING_NTYPES) {
- HDassert(cache_ptr->close_warning_received);
- switch(ring) {
- case H5C_RING_USER:
- break;
-
- case H5C_RING_RDFSM:
- if(!cache_ptr->rdfsm_settled) {
- hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
-
- /* Settle raw data FSM */
- if(H5MF_settle_raw_data_fsm(f, dxpl_id, &fsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
-
- /* Only set the flag if the FSM was actually settled */
- if(fsm_settled)
- cache_ptr->rdfsm_settled = TRUE;
- } /* end if */
- break;
-
- case H5C_RING_MDFSM:
- if(!cache_ptr->mdfsm_settled) {
- hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
-
- /* Settle metadata FSM */
- if(H5MF_settle_meta_data_fsm(f, dxpl_id, &fsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
-
- /* Only set the flag if the FSM was actually settled */
- if(fsm_settled)
- cache_ptr->mdfsm_settled = TRUE;
- } /* end if */
- break;
-
- case H5C_RING_SBE:
- case H5C_RING_SB:
- break;
-
- default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
- break;
- } /* end switch */
-
- if(H5C__serialize_ring(f, dxpl_id, ring) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "serialize ring failed")
-
- ring++;
- } /* end while */
-
-#ifndef NDEBUG
- /* Verify that no entry has been serialized more than once.
- * FD parents with multiple serializations should have been caught
- * elsewhere, so no specific check for them here.
- */
- {
- H5C_cache_entry_t * scan_ptr = NULL;
-
- scan_ptr = cache_ptr->il_head;
- while(scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(scan_ptr->serialization_count <= 1);
-
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif /* NDEBUG */
-
-done:
- cache_ptr->serialization_in_progress = FALSE;
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_cache() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5C__serialize_ring
- *
- * Purpose: Serialize the entries contained in the specified cache and
- * ring. All entries in rings outside the specified ring
- * must have been serialized on entry.
- *
- * If the cache contains protected entries in the specified
- * ring, the function will fail, as protected entries cannot
- * be serialized. However all unprotected entries in the
- * target ring should be serialized before the function
- * returns failure.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the index list
- * serializing entries in flush dependency order.
- *
- * All entries outside the H5C_RING_SBE are marked for
- * inclusion in the cache image. Entries in H5C_RING_SBE
- * and below are marked for exclusion from the image.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/11/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring)
-{
- hbool_t done = FALSE;
- H5C_t * cache_ptr;
- H5C_cache_entry_t * entry_ptr;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_STATIC
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
-
- HDassert(cache_ptr->serialization_in_progress);
-
- /* The objective here is to serialize all entries in the cache ring
- * in flush dependency order.
- *
- * The basic algorithm is to scan the cache index list looking for
- * unserialized entries that are either not in a flush dependency
- * relationship, or which have no unserialized children. Any such
- * entry is serialized and its flush dependency parents (if any) are
- * informed -- allowing them to decrement their userialized child counts.
- *
- * However, this algorithm is complicated by the ability
- * of client serialization callbacks to perform operations on
- * on the cache which can result in the insertion, deletion,
- * relocation, resize, dirty, flush, eviction, or removal (via the
- * take ownership flag) of entries. Changes in the flush dependency
- * structure are also possible.
- *
- * On the other hand, the algorithm is simplified by the fact that
- * we are serializing, not flushing. Thus, as long as all entries
- * are serialized correctly, it doesn't matter if we have to go back
- * and serialize an entry a second time.
- *
- * These possible actions result in the following modfications to
- * tha basic algorithm:
- *
- * 1) In the event of an entry expunge, eviction or removal, we must
- * restart the scan as it is possible that the next entry in our
- * scan is no longer in the cache. Were we to examine this entry,
- * we would be accessing deallocated memory.
- *
- * 2) A resize, dirty, or insertion of an entry may result in the
- * the increment of a flush dependency parent's dirty and/or
- * unserialized child count. In the context of serializing the
- * the cache, this is a non-issue, as even if we have already
- * serialized the parent, it will be marked dirty and its image
- * marked out of date if appropriate when the child is serialized.
- *
- * However, this is a major issue for a flush, as were this to happen
- * in a flush, it would violate the invariant that the flush dependency
- * feature is intended to enforce. As the metadata cache has no
- * control over the behavior of cache clients, it has no way of
- * preventing this behaviour. However, it should detect it if at all
- * possible.
- *
- * Do this by maintaining a count of the number of times each entry is
- * serialized during a cache serialization. If any flush dependency
- * parent is serialized more than once, throw an assertion failure.
- *
- * 3) An entry relocation will typically change the location of the
- * entry in the index list. This shouldn't cause problems as we
- * will scan the index list until we make a complete pass without
- * finding anything to serialize -- making relocations of either
- * the current or next entries irrelevant.
- *
- * Note that since a relocation may result in our skipping part of
- * the index list, we must always do at least one more pass through
- * the index list after an entry relocation.
- *
- * 4) Changes in the flush dependency structure are possible on
- * entry insertion, load, expunge, evict, or remove. Destruction
- * of a flush dependency has no effect, as it can only relax the
- * flush dependencies. Creation of a flush dependency can create
- * an unserialized child of a flush dependency parent where all
- * flush dependency children were previously serialized. Should
- * this child dirty the flush dependency parent when it is serialized,
- * the parent will be re-serialized.
- *
- * Per the discussion of 2) above, this is a non issue for cache
- * serialization, and a major problem for cache flush. Using the
- * same detection mechanism, throw an assertion failure if this
- * condition appears.
- *
- * Observe that either eviction or removal of entries as a result of
- * a serialization is not a problem as long as the flush depencency
- * tree does not change beyond the removal of a leaf.
- */
- while(!done) {
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- done = TRUE; /* set to FALSE if any activity in inner loop */
- entry_ptr = cache_ptr->il_head;
- while(entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
-
- /* Verify that either the entry is already serialized, or
- * that it is assigned to either the target or an inner
- * ring.
- */
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
-
- /* Skip flush me last entries or inner ring entries */
- if(!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
-
- /* if we encounter an unserialized entry in the current
- * ring that is not marked flush me last, we are not done.
- */
- if(!entry_ptr->image_up_to_date)
- done = FALSE;
-
- /* Serialize the entry if its image is not up to date
- * and it has no unserialized flush dependency children.
- */
- if(!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
- HDassert(entry_ptr->serialization_count == 0);
-
- /* Serialize the entry */
- if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
-
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
-
-#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif /* NDEBUG */
- } /* end if */
- } /* end if */
-
- /* Check for the cache being perturbed during the entry serialize */
- if((cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0)) {
-
-#if H5C_COLLECT_CACHE_STATS
- H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- /* Reset the counters */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- /* Restart scan */
- entry_ptr = cache_ptr->il_head;
- } /* end if */
- else
- /* Advance to next entry */
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
- } /* while ( ! done ) */
-
-
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- /* At this point, all entries not marked "flush me last" and in
- * the current ring or outside it should be serialized and have up
- * to date images. Scan the index list again to serialize the
- * "flush me last" entries (if they are in the current ring) and to
- * verify that all other entries have up to date images.
- */
- entry_ptr = cache_ptr->il_head;
- while(entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
- HDassert(entry_ptr->ring < H5C_RING_NTYPES);
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
-
- if(entry_ptr->ring == ring) {
- if(entry_ptr->flush_me_last) {
- if(!entry_ptr->image_up_to_date) {
- HDassert(entry_ptr->serialization_count == 0);
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
-
- /* Serialize the entry */
- if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
-
- /* Check for the cache changing */
- if((cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush_me_last entry serialization triggered restart")
-
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
-#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif /* NDEBUG */
- } /* end if */
- } /* end if */
- else {
- HDassert(entry_ptr->image_up_to_date);
- HDassert(entry_ptr->serialization_count <= 1);
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- } /* end else */
- } /* if ( entry_ptr->ring == ring ) */
-
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
-
-done:
- HDassert(cache_ptr->serialization_in_progress);
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_ring() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5C__serialize_single_entry
- *
- * Purpose: Serialize the cache entry pointed to by the entry_ptr
- * parameter.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer, 7/24/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
- H5C_cache_entry_t *entry_ptr)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_STATIC
-
- /* Sanity checks */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!entry_ptr->prefetched);
- HDassert(!entry_ptr->image_up_to_date);
- HDassert(entry_ptr->is_dirty);
- HDassert(!entry_ptr->is_protected);
- HDassert(!entry_ptr->flush_in_progress);
- HDassert(entry_ptr->type);
-
- /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
- * will not be evicted out from under us. Must set it back to FALSE
- * when we are done.
- */
- entry_ptr->flush_in_progress = TRUE;
-
- /* Allocate buffer for the entry image if required. */
- if(NULL == entry_ptr->image_ptr) {
- HDassert(entry_ptr->size > 0);
- if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)) )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
-#if H5C_DO_MEMORY_SANITY_CHECKS
- HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
-
- /* Generate image for entry */
- if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
-
- /* Reset the flush_in progress flag */
- entry_ptr->flush_in_progress = FALSE;
-
-done:
- HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
- HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_single_entry() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5C__write_cache_image_superblock_msg
*
* Purpose: Write the cache image superblock extension message,
@@ -3903,7 +3381,7 @@ H5C__write_cache_image(H5F_t *f, hid_t dxpl_id, const H5C_t *cache_ptr)
/* Write the buffer (if serial access, or rank 0 for parallel access) */
if(H5F_block_write(f, H5FD_MEM_SUPER, cache_ptr->image_addr, cache_ptr->image_len, dxpl_id, cache_ptr->image_buffer) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write metadata cache image block to file.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't write metadata cache image block to file")
#ifdef H5_HAVE_PARALLEL
} /* end if */
} /* end block */
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index 7df8093..ebb98b3 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -180,19 +180,19 @@ H5C_apply_candidate_list(H5F_t * f,
int i;
int m;
int n;
- int first_entry_to_flush;
- int last_entry_to_flush;
- int entries_to_clear = 0;
- int entries_to_flush = 0;
- int entries_to_flush_or_clear_last = 0;
- int entries_to_flush_collectively = 0;
- int entries_cleared = 0;
- int entries_flushed = 0;
- int entries_delayed = 0;
- int entries_flushed_or_cleared_last = 0;
- int entries_flushed_collectively = 0;
- int entries_examined = 0;
- int initial_list_len;
+ unsigned first_entry_to_flush;
+ unsigned last_entry_to_flush;
+ unsigned entries_to_clear = 0;
+ unsigned entries_to_flush = 0;
+ unsigned entries_to_flush_or_clear_last = 0;
+ unsigned entries_to_flush_collectively = 0;
+ unsigned entries_cleared = 0;
+ unsigned entries_flushed = 0;
+ unsigned entries_delayed = 0;
+ unsigned entries_flushed_or_cleared_last = 0;
+ unsigned entries_flushed_collectively = 0;
+ unsigned entries_examined = 0;
+ unsigned initial_list_len;
int * candidate_assignment_table = NULL;
haddr_t addr;
H5C_cache_entry_t * clear_ptr = NULL;
@@ -297,7 +297,7 @@ H5C_apply_candidate_list(H5F_t * f,
sprintf(&(tbl_buf[HDstrlen(tbl_buf)]), "\n");
HDfprintf(stdout, "%s", tbl_buf);
- HDfprintf(stdout, "%s:%d: flush entries [%d, %d].\n",
+ HDfprintf(stdout, "%s:%d: flush entries [%u, %u].\n",
FUNC, mpi_rank, first_entry_to_flush, last_entry_to_flush);
HDfprintf(stdout, "%s:%d: marking entries.\n", FUNC, mpi_rank);
@@ -329,19 +329,20 @@ H5C_apply_candidate_list(H5F_t * f,
* issue, we should be able to work around this.
*/
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry is protected?!?!?")
- /* Determine whether the entry is to be cleared or flushed,
- * and mark it accordingly. We will scan the protected and
- * pinned list shortly, and clear or flush according to these
- * markings.
- */
- if(i >= first_entry_to_flush && i <= last_entry_to_flush) {
- entries_to_flush++;
- entry_ptr->flush_immediately = TRUE;
- } /* end if */
- else {
- entries_to_clear++;
- entry_ptr->clear_on_unprotect = TRUE;
- } /* end else */
+
+ /* Determine whether the entry is to be cleared or flushed,
+ * and mark it accordingly. We will scan the protected and
+ * pinned list shortly, and clear or flush according to these
+ * markings.
+ */
+ if(u >= first_entry_to_flush && u <= last_entry_to_flush) {
+ entries_to_flush++;
+ entry_ptr->flush_immediately = TRUE;
+ } /* end if */
+ else {
+ entries_to_clear++;
+ entry_ptr->clear_on_unprotect = TRUE;
+ } /* end else */
/* Entries marked as collectively accessed and are in the
* candidate list to clear from the cache have to be
@@ -356,9 +357,9 @@ H5C_apply_candidate_list(H5F_t * f,
} /* end for */
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %u/%d/%d.\n",
- FUNC, mpi_rank, num_candidates, (int)entries_to_clear,
- (int)entries_to_flush);
+ HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %u/%u/%u.\n",
+ FUNC, mpi_rank, num_candidates, entries_to_clear,
+ entries_to_flush);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
/* We have now marked all the entries on the candidate list for
@@ -557,7 +558,7 @@ H5C_apply_candidate_list(H5F_t * f,
} /* end while */
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: entries examined/cleared/flushed = %d/%d/%d.\n",
+ HDfprintf(stdout, "%s:%d: entries examined/cleared/flushed = %u/%u/%u.\n",
FUNC, mpi_rank, entries_examined,
entries_cleared, entries_flushed);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
@@ -684,7 +685,7 @@ H5C_apply_candidate_list(H5F_t * f,
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
HDfprintf(stdout,
- "%s:%d: pel entries examined/cleared/flushed = %d/%d/%d.\n",
+ "%s:%d: pel entries examined/cleared/flushed = %u/%u/%u.\n",
FUNC, mpi_rank, entries_examined,
entries_cleared, entries_flushed);
HDfprintf(stdout, "%s:%d: done.\n", FUNC, mpi_rank);
@@ -745,7 +746,7 @@ H5C_apply_candidate_list(H5F_t * f,
(entries_cleared != entries_to_clear) ||
(entries_flushed_or_cleared_last != entries_to_flush_or_clear_last) ||
(entries_flushed_collectively != entries_to_flush_collectively))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry count mismatch.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry count mismatch")
done:
if(candidate_assignment_table != NULL)
@@ -992,23 +993,23 @@ done:
herr_t
H5C_mark_entries_as_clean(H5F_t * f,
hid_t dxpl_id,
- int32_t ce_array_len,
+ unsigned ce_array_len,
haddr_t * ce_array_ptr)
{
H5C_t * cache_ptr;
- int entries_cleared;
+ unsigned entries_cleared;
unsigned entries_examined;
- int i;
unsigned initial_list_len;
haddr_t addr;
#if H5C_DO_SANITY_CHECKS
- int pinned_entries_marked = 0;
- int protected_entries_marked = 0;
- int other_entries_marked = 0;
+ unsigned pinned_entries_marked = 0;
+ unsigned protected_entries_marked = 0;
+ unsigned other_entries_marked = 0;
haddr_t last_addr;
#endif /* H5C_DO_SANITY_CHECKS */
H5C_cache_entry_t * clear_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
+ unsigned u;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1029,11 +1030,11 @@ H5C_mark_entries_as_clean(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- for(i = 0; i < ce_array_len; i++) {
- addr = ce_array_ptr[i];
+ for(u = 0; u < ce_array_len; u++) {
+ addr = ce_array_ptr[u];
#if H5C_DO_SANITY_CHECKS
- if(i == 0)
+ if(u == 0)
last_addr = addr;
else {
if(last_addr == addr)
@@ -1057,17 +1058,17 @@ H5C_mark_entries_as_clean(H5F_t * f,
if(entry_ptr == NULL) {
#if H5C_DO_SANITY_CHECKS
HDfprintf(stdout,
- "H5C_mark_entries_as_clean: entry[%d] = %ld not in cache.\n",
- (int)i,
- (long)addr);
+ "H5C_mark_entries_as_clean: entry[%u] = %a not in cache.\n",
+ u,
+ addr);
#endif /* H5C_DO_SANITY_CHECKS */
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not in cache?!?!?")
} /* end if */
else if(!entry_ptr->is_dirty) {
#if H5C_DO_SANITY_CHECKS
HDfprintf(stdout,
- "H5C_mark_entries_as_clean: entry %ld is not dirty!?!\n",
- (long)addr);
+ "H5C_mark_entries_as_clean: entry %a is not dirty!?!\n",
+ addr);
#endif /* H5C_DO_SANITY_CHECKS */
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not dirty?!?!?")
} /* end else-if */
@@ -1178,17 +1179,17 @@ H5C_mark_entries_as_clean(H5F_t * f,
( (ce_array_len - entries_cleared) <= cache_ptr->pl_len ) );
#if H5C_DO_SANITY_CHECKS
- i = 0;
+ u = 0;
entry_ptr = cache_ptr->pl_head_ptr;
while ( entry_ptr != NULL )
{
if ( entry_ptr->clear_on_unprotect ) {
- i++;
+ u++;
}
entry_ptr = entry_ptr->next;
}
- HDassert( (entries_cleared + i) == ce_array_len );
+ HDassert( (entries_cleared + u) == ce_array_len );
#endif /* H5C_DO_SANITY_CHECKS */
done:
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 2474c3a..5b923e9 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -374,7 +374,6 @@ if ( ( (entry_ptr) == NULL ) || \
( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (hd_ptr) != (tail_ptr) ) \
) || \
- ( (len) < 0 ) || \
( ( (len) == 1 ) && \
( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
@@ -482,7 +481,7 @@ if ( ( (hd_ptr) == NULL ) || \
) \
) \
) { \
- HDassert(0 && "il DLL pre remove SC failed"); \
+ HDassert(0 && "il DLL pre remove SC failed"); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "il DLL pre remove SC failed") \
}
@@ -504,7 +503,7 @@ if ( ( (entry_ptr) == NULL ) || \
) \
) \
) { \
- HDassert(0 && "IL DLL pre insert SC failed"); \
+ HDassert(0 && "IL DLL pre insert SC failed"); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "IL DLL pre insert SC failed") \
}
@@ -1591,7 +1590,7 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->slist_size ); \
\
if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "Can't insert entry in skip list") \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
\
(entry_ptr)->in_slist = TRUE; \
(cache_ptr)->slist_changed = TRUE; \
@@ -1626,8 +1625,7 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->slist_size ); \
\
if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
- "Can't insert entry in skip list") \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
\
(entry_ptr)->in_slist = TRUE; \
(cache_ptr)->slist_changed = TRUE; \
@@ -1716,8 +1714,7 @@ if ( ( (cache_ptr)->index_size != \
\
if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
!= (entry_ptr) ) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
- "Can't delete entry from skip list.") \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't delete entry from skip list") \
\
HDassert( (cache_ptr)->slist_len > 0 ); \
if(!(during_flush)) \
@@ -2954,39 +2951,39 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
\
- /* modified LRU specific code */ \
+ /* modified LRU specific code */ \
\
- /* insert the entry at the head of the LRU list. */ \
+ /* insert the entry at the head of the LRU list. */ \
\
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
+ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
\
- /* Similarly, insert the entry at the head of either the clean \
- * or dirty LRU list as appropriate. \
- */ \
+ /* Similarly, insert the entry at the head of either the clean \
+ * or dirty LRU list as appropriate. \
+ */ \
\
- if ( (entry_ptr)->is_dirty ) { \
+ if ( (entry_ptr)->is_dirty ) { \
\
- H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, \
- (fail_val)) \
+ H5C__AUX_DLL_PREPEND((entry_ptr), \
+ (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, \
+ (fail_val)) \
\
- } else { \
+ } else { \
\
- H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, \
- (fail_val)) \
- } \
+ H5C__AUX_DLL_PREPEND((entry_ptr), \
+ (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, \
+ (fail_val)) \
+ } \
\
- /* End modified LRU specific code. */ \
+ /* End modified LRU specific code. */ \
\
} /* H5C__UPDATE_RP_FOR_UNPIN */
@@ -3181,22 +3178,22 @@ if ( ( (hd_ptr) == NULL ) || \
( (len) <= 0 ) || \
( (Size) < (entry_ptr)->size ) || \
( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \
- ( ( (entry_ptr)->coll_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->coll_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
( ( (entry_ptr)->coll_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
( ( (len) == 1 ) && \
( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \
- ( (entry_ptr)->coll_next == NULL ) && \
- ( (entry_ptr)->coll_prev == NULL ) && \
+ ( (entry_ptr)->coll_next == NULL ) && \
+ ( (entry_ptr)->coll_prev == NULL ) && \
( (Size) == (entry_ptr)->size ) \
) \
) \
) \
) { \
- HDassert(0 && "coll DLL pre remove SC failed"); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "coll DLL pre remove SC failed") \
+ HDassert(0 && "coll DLL pre remove SC failed"); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "coll DLL pre remove SC failed") \
}
-#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
+#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (head_ptr) != (tail_ptr) ) \
) || \
@@ -3208,36 +3205,35 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
) \
) || \
( ( (len) >= 1 ) && \
- ( ( (head_ptr) == NULL ) || ( (head_ptr)->coll_prev != NULL ) || \
- ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
+ ( ( (head_ptr) == NULL ) || ( (head_ptr)->coll_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
) \
) \
) { \
- HDassert(0 && "COLL DLL sanity check failed"); \
+ HDassert(0 && "COLL DLL sanity check failed"); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL sanity check failed") \
}
#define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
if ( ( (entry_ptr) == NULL ) || \
- ( (entry_ptr)->coll_next != NULL ) || \
- ( (entry_ptr)->coll_prev != NULL ) || \
+ ( (entry_ptr)->coll_next != NULL ) || \
+ ( (entry_ptr)->coll_prev != NULL ) || \
( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (hd_ptr) != (tail_ptr) ) \
) || \
- ( (len) < 0 ) || \
( ( (len) == 1 ) && \
( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
) \
) || \
( ( (len) >= 1 ) && \
- ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->coll_prev != NULL ) || \
- ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
+ ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->coll_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
) \
) \
) { \
- HDassert(0 && "COLL DLL pre insert SC failed"); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL pre insert SC failed") \
+ HDassert(0 && "COLL DLL pre insert SC failed"); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL pre insert SC failed") \
}
#else /* H5C_DO_SANITY_CHECKS */
@@ -4294,8 +4290,8 @@ typedef struct H5C_tag_info_t {
* page buffering, this is no longer viable, as we must finalize the on
* disk image of all metadata much sooner.
*
- * This is handled by the H5FS_settle_raw_data_fsm() and
- * H5FS_settle_meta_data_fsm() routines. As these calls are expensive,
+ * This is handled by the H5MF_settle_raw_data_fsm() and
+ * H5MF_settle_meta_data_FSM() routines. As these calls are expensive,
* the following fields are used to track whether the target free space
* managers are clean.
*
@@ -4679,8 +4675,8 @@ struct H5C_t {
H5SL_t * slist_ptr;
uint32_t num_last_entries;
#if H5C_DO_SANITY_CHECKS
- int64_t slist_len_increase;
- int64_t slist_size_increase;
+ int32_t slist_len_increase;
+ ssize_t slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */
/* Fields for maintaining list of tagged entries */
@@ -4876,7 +4872,7 @@ typedef int (*H5C_tag_iter_cb_t)(H5C_cache_entry_t *entry, void *ctx);
/* Package Private Prototypes */
/******************************/
H5_DLL herr_t H5C__prep_image_for_file_close(H5F_t *f, hid_t dxpl_id);
-H5_DLL herr_t H5C__deserialize_prefetched_entry(H5F_t * f, hid_t dxpl_id,
+H5_DLL herr_t H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id,
H5C_t * cache_ptr, H5C_cache_entry_t** entry_ptr_ptr,
const H5C_class_t * type, haddr_t addr, void * udata);
@@ -4887,11 +4883,12 @@ H5_DLL herr_t H5C__generate_cache_image(H5F_t *f, hid_t dxpl_id, H5C_t *cache_pt
H5_DLL herr_t H5C__load_cache_image(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t * entry_ptr);
H5_DLL herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr);
-H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, hid_t dxpl_id,
+H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, hid_t dxpl_id,
size_t space_needed, hbool_t write_permitted);
H5_DLL herr_t H5C__flush_marked_entries(H5F_t * f, hid_t dxpl_id);
H5_DLL herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr,
H5C_cache_entry_t *entry_ptr, hid_t dxpl_id);
+H5_DLL herr_t H5C__serialize_cache(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global,
H5C_tag_iter_cb_t cb, void *cb_ctx);
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 9696cc0..28eacf2 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -2284,13 +2284,14 @@ H5_DLL herr_t H5C_apply_candidate_list(H5F_t *f, hid_t dxpl_id,
H5_DLL herr_t H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr);
H5_DLL herr_t H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr);
H5_DLL herr_t H5C_clear_coll_entries(H5C_t * cache_ptr, hbool_t partial);
-H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t dxpl_id, int32_t ce_array_len,
+H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t dxpl_id, unsigned ce_array_len,
haddr_t *ce_array_ptr);
#endif /* H5_HAVE_PARALLEL */
#ifndef NDEBUG /* debugging functions */
H5_DLL hbool_t H5C_get_serialization_in_progress(const H5C_t *cache_ptr);
H5_DLL hbool_t H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring);
+H5_DLL herr_t H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn);
H5_DLL herr_t H5C_get_entry_ptr_from_addr(H5C_t *cache_ptr, haddr_t addr,
void **entry_ptr_ptr);
H5_DLL herr_t H5C_flush_dependency_exists(H5C_t *cache_ptr, haddr_t parent_addr,
diff --git a/src/H5Cquery.c b/src/H5Cquery.c
index 03aadb0..33a322d 100644
--- a/src/H5Cquery.c
+++ b/src/H5Cquery.c
@@ -444,7 +444,8 @@ H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring)
/* Locate the entry at the address */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
- HDassert(entry_ptr);
+ if(entry_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't find entry in index")
/* Return the ring value */
*ring = entry_ptr->ring;
diff --git a/src/H5Fint.c b/src/H5Fint.c
index ada0e96..d122357 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -833,7 +833,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
if((H5F_ACC_RDWR & H5F_INTENT(f)) && flush)
if(H5F__flush_phase1(f, dxpl_id) < 0)
/* Push error, but keep going*/
- HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
+ HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush cached data (phase 1)")
/* Notify the metadata cache that the file is about to be closed.
* This allows the cache to set up for creating a metadata cache
@@ -849,8 +849,8 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
*/
if((H5F_ACC_RDWR & H5F_INTENT(f)) && flush)
if(H5F__flush_phase2(f, dxpl_id, TRUE) < 0)
- /* Push error, but keep going*/
- HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
+ /* Push error, but keep going */
+ HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush cached data (phase 2)")
/* With the shutdown modifications, the contents of the metadata cache
* should be clean at this point, with the possible exception of the
@@ -1285,6 +1285,10 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
shared = file->shared;
lf = shared->lf;
+ /* Get the file access property list, for future queries */
+ if(NULL == (a_plist = (H5P_genplist_t *)H5I_object(fapl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not file access property list")
+
/*
* Read or write the file superblock, depending on whether the file is
* empty or not.
@@ -1318,10 +1322,6 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to read root group")
} /* end if */
- /* Get the file access property list, for future queries */
- if(NULL == (a_plist = (H5P_genplist_t *)H5I_object(fapl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not file access property list")
-
/*
* Decide the file close degree. If it's the first time to open the
* file, set the degree to access property list value; if it's the
@@ -1529,7 +1529,7 @@ H5F__flush_phase2(H5F_t *f, hid_t dxpl_id, hbool_t closing)
/* Flush file buffers to disk. */
if(H5FD_flush(f->shared->lf, dxpl_id, closing) < 0)
/* Push error, but keep going*/
- HDONE_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "low level flush failed")
+ HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "low level flush failed")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F__flush_phase2() */
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index e203c38..3b86dae 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -240,7 +240,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-herr_t
+static herr_t
H5F__update_super_ext_driver_msg(H5F_t *f, hid_t dxpl_id)
{
H5F_super_t *sblock; /* Pointer to the super block */
@@ -861,6 +861,7 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
H5O_loc_t ext_loc; /* Superblock extension object location */
hbool_t need_ext; /* Whether the superblock extension is needed */
hbool_t ext_created = FALSE; /* Whether the extension has been created */
+ hbool_t non_default_fs_settings = FALSE; /* Whether the file has non-default free-space settings */
herr_t ret_value = SUCCEED; /* Return Value */
FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
@@ -887,6 +888,11 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, &sblock->btree_k[0]) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes")
+ /* Check for non-default free-space settings */
+ if(!(f->shared->fs_strategy == H5F_FILE_SPACE_STRATEGY_DEF &&
+ f->shared->fs_threshold == H5F_FREE_SPACE_THRESHOLD_DEF))
+ non_default_fs_settings = TRUE;
+
/* Bump superblock version if latest superblock version support is enabled */
if(H5F_USE_LATEST_FLAGS(f, H5F_LATEST_SUPERBLOCK))
super_vers = HDF5_SUPERBLOCK_VERSION_LATEST;
@@ -896,8 +902,7 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
/* Bump superblock version to create superblock extension for
* non-default file space strategy or non-default free-space threshold
*/
- else if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
- f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF)
+ else if(non_default_fs_settings)
super_vers = HDF5_SUPERBLOCK_VERSION_2;
/* Check for non-default indexed storage B-tree internal 'K' value
* and set the version # of the superblock to 1 if it is a non-default
@@ -1014,8 +1019,7 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
need_ext = TRUE;
} /* end if */
/* Files with non-default free space settings always need the superblock extension */
- else if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
- f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF) {
+ else if(non_default_fs_settings) {
HDassert(super_vers >= HDF5_SUPERBLOCK_VERSION_2);
need_ext = TRUE;
} /* end if */
@@ -1103,9 +1107,8 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
f->shared->drvinfo_sb_msg_exists = TRUE;
} /* end if */
- /* Check for non-default free space settings */
- if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
- f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF) {
+ /* Check for non-default free-space info settings */
+ if(non_default_fs_settings) {
H5FD_mem_t type; /* Memory type for iteration */
H5O_fsinfo_t fsinfo; /* Free space manager info message */
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index 8e5ed76..ffdac9a 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -117,10 +117,10 @@ static herr_t H5HF__cache_dblock_free_icr(void *thing);
/* Debugging Function Prototypes */
#ifndef NDEBUG
static herr_t H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
- H5HF_hdr_t * hdr, hbool_t *fd_clean, hbool_t *clean);
+ H5HF_hdr_t *hdr, hbool_t *fd_clean, hbool_t *clean);
static herr_t H5HF__cache_verify_iblock_descendants_clean(H5F_t *f,
hid_t dxpl_id, haddr_t fd_parent_addr, H5HF_indirect_t *iblock,
- unsigned *iblock_status, hbool_t * fd_clean, hbool_t *clean);
+ unsigned *iblock_status, hbool_t *fd_clean, hbool_t *clean);
static herr_t H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f,
haddr_t fd_parent_addr, H5HF_indirect_t *iblock, hbool_t *fd_clean,
hbool_t *clean, hbool_t *has_dblocks);
diff --git a/src/H5MF.c b/src/H5MF.c
index 00d6b28..23f128f 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -1251,18 +1251,18 @@ done:
* Purpose: Handle any tasks required before the metadata cache
* can serialize or flush the raw data free space manager
* and any metadata free space managers that reside in the
- * raw data free space manager ring.
+ * raw data free space manager ring.
*
* Specifically, any metadata managers that DON'T handle
- * space allocation for free space manager header or section
- * info will reside in the raw data free space manager ring.
- * As of this writing, the plan is to move to only two free space
+ * space allocation for free space manager header or section
+ * info will reside in the raw data free space manager ring.
+ * As of this writing, the plan is to move to only two free space
* managers, one for raw data and one for metadata -- which
* means that only the raw data free space manager will reside
* in the free space manager ring. However, this has not been
* fully implemented yet, so this code must support the
* possibilty of multiple metadata free space managers, at most
- * two of which handle free space manager header or section info,
+ * two of which handle free space manager header or section info,
* and thus reside in the metadata free space manager ring.
*
* At present, the task list is:
@@ -1271,20 +1271,20 @@ done:
*
* a) Free both aggregators. Space not at EOA will be
* added to the appropriate free space manager.
- *
+ *
* The raw data aggregator should not be restarted
* after this point. It is possible that the metadata
* aggregator will be.
- *
+ *
* b) Free all file space currently allocated to free
- * space managers.
+ * space managers.
*
* c) Delete the free space manager superblock
* extension message if allocated.
*
* This done, reduce the EOA by moving it to just before
* the last piece of free memory in the file.
- *
+ *
* 2) Ensure that space is allocated for the free space
* manager superblock extension message. Must do this
* now, before reallocating file space for free space
@@ -1309,6 +1309,7 @@ done:
* We will allocate space for free space managers involved
* in the allocation of file space for free space managers
* in H5MF_settle_meta_data_fsm()
+ *
* Return: SUCCEED/FAIL
*
* Programmer: John Mainzer
@@ -1348,9 +1349,9 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* a) Free the space in aggregators:
*
- * (for space not at EOF, it may be put into free space managers)
+ * (for space not at EOF, it may be put into free space managers)
*
- * Do this now so that the raw data FSM (and any other FSM that isn't
+ * Do this now so that the raw data FSM (and any other FSM that isn't
* involved in space allocation for FSMs) will have no further activity.
*
* Note that while the raw data aggregator should not be restarted during
@@ -1371,23 +1372,23 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* b) Free the file space (if any) allocated to each free space manager.
*
- * Do this to facilitate reduction of the size of the file to the
- * extent possible. We will re-allocate space to free space managers
+ * Do this to facilitate reduction of the size of the file to the
+ * extent possible. We will re-allocate space to free space managers
* that have free space to save after this reduction.
*
* In the case of the raw data free space manager, and any other free
* space manager that does not allocate space for free space managers,
* allocations should be complete at this point, as all raw data should
* have space allocated and be flushed to file at this point. Thus we
- * can examine such free space managers and only re-allocate space for
+ * can examine such free space managers and only re-allocate space for
* them if they contain free space. Do this later in this function after
* the EOA has been reduced to the extent possible.
*
- * For free space managers that allocate file space for free space
- * managers (usually just a single metadata free space manager, but for
- * now at least, free space managers for different types of metadata
+ * For free space managers that allocate file space for free space
+ * managers (usually just a single metadata free space manager, but for
+ * now at least, free space managers for different types of metadata
* are possible), the matter is more ticklish due to the self-
- * referential nature of the problem. These FSMs are dealt with in
+ * referential nature of the problem. These FSMs are dealt with in
* H5MF_settle_meta_data_fsm().
*/
for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
@@ -1403,8 +1404,8 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
if(!fsm_visited[fsm_type]) {
fsm_visited[fsm_type] = TRUE;
- /* If there is no active FSM for this type, but such a FSM has
- * space allocated in file, open it so that we can free its file
+ /* If there is no active FSM for this type, but such a FSM has
+ * space allocated in file, open it so that we can free its file
* space.
*/
if(NULL == f->shared->fs_man[fsm_type]) {
@@ -1429,8 +1430,8 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
else
needed_ring = H5AC_RING_RDFSM;
if(needed_ring != curr_ring) {
- if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring)< 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value.")
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
curr_ring = needed_ring;
} /* end if */
@@ -1456,11 +1457,11 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
} /* end if */
} /* end for */
- /* c) Delete the free space manager superblock extension message
+ /* c) Delete the free space manager superblock extension message
* if allocated.
*
* Must do this since the routine that writes / creates superblock
- * extension messages will choke if the target message is
+ * extension messages will choke if the target message is
* unexpectedly either absent or present.
*/
if(H5F_addr_defined(f->shared->sblock->ext_addr))
@@ -1471,39 +1472,39 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
if(H5MF__close_shrink_eoa(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
-
- /* 2) Ensure that space is allocated for the free space manager superblock
- * extension message. Must do this now, before reallocating file space
+
+ /* 2) Ensure that space is allocated for the free space manager superblock
+ * extension message. Must do this now, before reallocating file space
* for free space managers, as it is possible that this allocation may
- * grab the last section in a FSM -- making it unnecessary to
+ * grab the last section in a FSM -- making it unnecessary to
* re-allocate file space for it.
*
* Do this by writing a free space manager superblock extension message.
- *
- * Since no free space manager has file space allocated for it, this
+ *
+ * Since no free space manager has file space allocated for it, this
* message must be invalid since we can't save addresses of FSMs when
* those addresses are unknown. This is OK -- we will write the correct
* values to the message at free space manager shutdown.
*/
- for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
- fsinfo.fs_addr[type-1] = HADDR_UNDEF;
+ for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
+ fsinfo.fs_addr[type - 1] = HADDR_UNDEF;
fsinfo.strategy = f->shared->fs_strategy;
fsinfo.threshold = f->shared->fs_threshold;
if(H5F_super_ext_write_msg(f, dxpl_id, H5O_FSINFO_ID, &fsinfo, TRUE, H5O_MSG_NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_WRITEERROR, FAIL, "error in writing fsinfo message to superblock extension")
/* 3) Scan all free space managers not involved in allocating
* space for free space managers. For each such free space
- * manager, test to see if it contains free space. If
+ * manager, test to see if it contains free space. If
* it does, allocate file space for its header and section
- * data. If it contains no free space, leave it without
- * allocated file space as there is no need to save it to
+ * data. If it contains no free space, leave it without
+ * allocated file space as there is no need to save it to
* file.
*
* Note that all free space managers in this class should
- * see no further space allocations / deallocations as
- * at this point, all raw data allocations should be
+ * see no further space allocations / deallocations as
+ * at this point, all raw data allocations should be
* finalized, as should all metadata allocations not involving
* free space managers.
*
@@ -1529,11 +1530,11 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
if(needed_ring != curr_ring) {
if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring)< 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value.")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
curr_ring = needed_ring;
} /* end if */
- /* Since there can be a many-to-one mapping from memory types
+ /* Since there can be a many-to-one mapping from memory types
* to free space managers, ensure that we don't visit any FSM
* more than once.
*/
@@ -1555,10 +1556,10 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* Query free space manager info for this type */
if(H5FS_stat_info(f, f->shared->fs_man[fsm_type], &fs_stat) < 0 )
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't get free-space info")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info")
- /* If the free space manager contains section info,
- * allocate space for the header and sinfo (note that
+ /* If the free space manager contains section info,
+ * allocate space for the header and sinfo (note that
* space must not be allocated at present -- verify
* verify this with assertions).
*/
@@ -1600,7 +1601,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* Close any opened FSMs */
if(fsm_opened[fsm_type]) {
if(H5MF__alloc_close(f, dxpl_id, fsm_type) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't close file free space manager")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't close file free space manager")
fsm_opened[fsm_type] = FALSE;
} /* end if */
} /* end if */
@@ -1788,7 +1789,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
} /* end if */
#endif /* NDEBUG */
- /* Free the space in the metadata aggregator. Do this via the
+ /* Free the space in the metadata aggregator. Do this via the
* H5MF_free_aggrs() call. Note that the raw data aggregator must
* have already been freed. Sanity checks for this?
*/
@@ -1802,18 +1803,18 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* ******************* PROBLEM: ********************
*
- * If the file has an alignement other than 1, and if
- * the EOA is not a multiple of this alignment, allocating sapce
- * for the section via the VFD info has the potential of generating
- * a fragment that will be added to the free space manager. This
+ * If the file has an alignement other than 1, and if
+ * the EOA is not a multiple of this alignment, allocating sapce
+ * for the section via the VFD info has the potential of generating
+ * a fragment that will be added to the free space manager. This
* of course undoes everything we have been doing here.
*
- * Need a way around this. Obvious solution is to force the EOA to
- * be a multiple of the alignment.
+ * Need a way around this. Obvious solution is to force the EOA to
+ * be a multiple of the alignment.
*
* Fortunately, alignment is typically 1, so this is a non-issue in
- * most cases. In cases where the alignment is not 1, for now we
- * have decided to drop the fragment on the floor.
+ * most cases. In cases where the alignment is not 1, for now we
+ * have decided to drop the fragment on the floor.
*
* Eventually, we should fix this by modifying the on disk representations
* of free space managers to allow for empty space, so as to bypass the
@@ -1823,30 +1824,30 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
/* HDassert(f->shared->alignment == 1); */
- /* The free space manager(s) that handle space allocations for free
- * space managers should be settled now, albeit without file space
+ /* The free space manager(s) that handle space allocations for free
+ * space managers should be settled now, albeit without file space
* allocated to them. To avoid the possibility of changing the sizes
- * of their section info blocks, allocate space for them now at the
+ * of their section info blocks, allocate space for them now at the
* end of file via H5FD_alloc().
*
- * In the past, this issue of allocating space without touching the
- * free space managers has been deal with by calling
- * H5MF_aggr_vfd_alloc(), which in turn calls H5MF_aggr_alloc().
- * This is problematic since (if I read the code correctly) it will
- * re-constitute the metadata aggregator, which will add any left
- * over space to one of the free space managers when freed.
+ * In the past, this issue of allocating space without touching the
+ * free space managers has been deal with by calling
+ * H5MF_aggr_vfd_alloc(), which in turn calls H5MF_aggr_alloc().
+ * This is problematic since (if I read the code correctly) it will
+ * re-constitute the metadata aggregator, which will add any leftover
+ * space to one of the free space managers when freed.
*
* This is a non-starter, since the entire objective is to settle the
* free space managers.
*
- * Hence the decision to call H5FD_alloc() directly.
- *
- * As discussed in PROBLEM above, if f->shared->alignment is not 1,
+ * Hence the decision to call H5FD_alloc() directly.
+ *
+ * As discussed in PROBLEM above, if f->shared->alignment is not 1,
* this has the possibility of generating a fragment of file space
* that would typically be inserted into one of the free space managers.
*
* This is isn't good, but due to schedule pressure, we will just drop
- * the fragement on the floor for now.
+ * the fragment on the floor for now.
*/
if(hdr_fspace)
if(H5FS_alloc_vfd_alloc_hdr_and_section_info(f, dxpl_id, hdr_fspace,
diff --git a/src/H5MFprivate.h b/src/H5MFprivate.h
index 330fe80..e258677 100644
--- a/src/H5MFprivate.h
+++ b/src/H5MFprivate.h
@@ -51,15 +51,13 @@
/* File space manager routines */
H5_DLL herr_t H5MF_init_merge_flags(H5F_t *f);
-H5_DLL herr_t H5MF_get_freespace(H5F_t *f, hid_t dxpl_id, hsize_t *tot_space,
- hsize_t *meta_size);
+H5_DLL herr_t H5MF_get_freespace(H5F_t *f, hid_t dxpl_id, hsize_t *tot_space, hsize_t *meta_size);
H5_DLL herr_t H5MF_close(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5MF_try_close(H5F_t *f, hid_t dxpl_id);
/* File space allocation routines */
H5_DLL haddr_t H5MF_alloc(H5F_t *f, H5FD_mem_t type, hid_t dxpl_id, hsize_t size);
-H5_DLL haddr_t H5MF_aggr_vfd_alloc(H5F_t *f, H5FD_mem_t type, hid_t dxpl_id,
- hsize_t size);
+H5_DLL haddr_t H5MF_aggr_vfd_alloc(H5F_t *f, H5FD_mem_t type, hid_t dxpl_id, hsize_t size);
H5_DLL haddr_t H5MF_vfd_alloc(H5F_t *f, hid_t dxpl_id, H5FD_mem_t alloc_type,
hsize_t size, hbool_t keep_fragment);
H5_DLL herr_t H5MF_xfree(H5F_t *f, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
@@ -78,7 +76,7 @@ H5_DLL haddr_t H5MF_alloc_tmp(H5F_t *f, hsize_t size);
H5_DLL herr_t H5MF_free_aggrs(H5F_t *f, hid_t dxpl_id);
H5_DLL htri_t H5MF_aggrs_try_shrink_eoa(H5F_t *f, hid_t dxpl_id);
-/* Settling routines */
+/* Free space manager settling routines */
H5_DLL herr_t H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled);
H5_DLL herr_t H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled);