summaryrefslogtreecommitdiffstats
path: root/src/H5C.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/H5C.c')
-rw-r--r--src/H5C.c697
1 files changed, 558 insertions, 139 deletions
diff --git a/src/H5C.c b/src/H5C.c
index ba23b50..805b4f5 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -159,6 +159,10 @@ static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry);
static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry);
+static herr_t H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring);
+static herr_t H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id,
+ H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
+
static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t * type,
haddr_t addr, size_t *len, hbool_t actual);
@@ -750,20 +754,10 @@ H5C_prep_for_file_close(H5F_t *f, hid_t dxpl_id)
/* Make certain there aren't any protected entries */
HDassert(cache_ptr->pl_len == 0);
- /* If the file is opened and closed without any access to
- * any group or dataset, it is possible that the cache image (if
- * it exists) has not been read yet. Do this now if required.
- */
- if(cache_ptr->load_image) {
- cache_ptr->load_image = FALSE;
- if(H5C__load_cache_image(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't load cache image")
- } /* end if */
+ /* Prepare cache image */
+ if(H5C__prep_image_for_file_close(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cache image")
- /* Generate the cache image, if requested */
- if(cache_ptr->image_ctl.generate_image)
- if(H5C__prep_image_for_file_close(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "Can't create cache image")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1086,9 +1080,9 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
*/
ring = H5C_RING_USER;
while(ring < H5C_RING_NTYPES) {
- /* only call the free space manager settle routines when close
- * warning has been received, and then only when the index is
- * non-empty for that ring.
+
+ /* Only call the free space manager settle routines when close
+ * warning has been received.
*/
if(cache_ptr->close_warning_received) {
switch(ring) {
@@ -1096,36 +1090,20 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
break;
case H5C_RING_RDFSM:
- if(!cache_ptr->rdfsm_settled) {
- hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
-
- /* Settle raw data FSM */
- if(H5MF_settle_raw_data_fsm(f, dxpl_id, &fsm_settled) < 0)
+ /* Settle raw data FSM */
+ if(!cache_ptr->rdfsm_settled)
+ if(H5MF_settle_raw_data_fsm(f, dxpl_id, &cache_ptr->rdfsm_settled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
-
- /* Only set the flag if the FSM was actually settled */
- if(fsm_settled)
- cache_ptr->rdfsm_settled = TRUE;
- } /* end if */
break;
case H5C_RING_MDFSM:
- if(!cache_ptr->mdfsm_settled) {
- hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
-
- /* Settle metadata FSM */
- if(H5MF_settle_meta_data_fsm(f, dxpl_id, &fsm_settled) < 0)
+ /* Settle metadata FSM */
+ if(!cache_ptr->mdfsm_settled)
+ if(H5MF_settle_meta_data_fsm(f, dxpl_id, &cache_ptr->mdfsm_settled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
-
- /* Only set the flag if the FSM was actually settled */
- if(fsm_settled)
- cache_ptr->mdfsm_settled = TRUE;
- } /* end if */
break;
case H5C_RING_SBE:
- break;
-
case H5C_RING_SB:
break;
@@ -1470,6 +1448,11 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->aux_next = NULL;
entry_ptr->aux_prev = NULL;
+#ifdef H5_HAVE_PARALLEL
+ entry_ptr->coll_next = NULL;
+ entry_ptr->coll_prev = NULL;
+#endif /* H5_HAVE_PARALLEL */
+
/* initialize cache image related fields */
entry_ptr->include_in_image = FALSE;
entry_ptr->lru_rank = 0;
@@ -1486,11 +1469,6 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->serialization_count = 0;
#endif /* NDEBUG */
-#ifdef H5_HAVE_PARALLEL
- entry_ptr->coll_next = NULL;
- entry_ptr->coll_prev = NULL;
-#endif /* H5_HAVE_PARALLEL */
-
/* Apply tag to newly inserted entry */
if(H5C__tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
@@ -1556,7 +1534,7 @@ H5C_insert_entry(H5F_t * f,
if(H5C__make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed")
- }
+ } /* end if */
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
@@ -2542,6 +2520,8 @@ H5C_protect(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
entry_ptr = (H5C_cache_entry_t *)thing;
+ cache_ptr->entries_loaded_counter++;
+
entry_ptr->ring = ring;
#ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access)
@@ -2632,7 +2612,7 @@ H5C_protect(H5F_t * f,
if(H5C__make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0 )
HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
- }
+ } /* end if */
/* Insert the entry in the hash table. It can't be dirty yet, so
* we don't even check to see if it should go in the skip list.
@@ -2667,13 +2647,10 @@ H5C_protect(H5F_t * f,
*/
H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL)
- /* Update entries loaded in cache counter */
- cache_ptr->entries_loaded_counter++;
-
/* Record that the entry was loaded, to trigger a notify callback later */
/* (After the entry is fully added to the cache) */
was_loaded = TRUE;
- }
+ } /* end else */
HDassert(entry_ptr->addr == addr);
HDassert(entry_ptr->type == type);
@@ -3490,23 +3467,20 @@ done:
*
* Function: H5C_unsettle_entry_ring
*
- * Purpose: Advise the metadata cache that the specified entry's metadata
- * cache manager ring is no longer settled (if it was on entry).
+ * Purpose: Advise the metadata cache that the specified entry's free space
+ * manager ring is no longer settled (if it was on entry).
*
- * If the target metadata cache manager ring is already
+ * If the target free space manager ring is already
* unsettled, do nothing, and return SUCCEED.
*
- * If the target metadata cache manager ring is settled, and
+ * If the target free space manager ring is settled, and
* we are not in the process of a file shutdown, mark
* the ring as unsettled, and return SUCCEED.
*
- * If the target metadata cache manager is settled, and we
+ * If the target free space manager is settled, and we
* are in the process of a file shutdown, post an error
* message, and return FAIL.
*
- * Note that this function simply passes the call on to
- * the metadata cache proper, and returns the result.
- *
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
@@ -4565,19 +4539,6 @@ done:
*
* Programmer: John Mainzer, 11/22/04
*
- * Changes: Modified function to detect deletions of entries
- * during a scan of the LRU, and where appropriate,
- * restart the scan to avoid proceeding with a next
- * entry that is no longer in the cache.
- *
- * Note the absence of checks after flushes of clean
- * entries. As a second entry can only be removed by
- * by a call to the pre_serialize or serialize callback
- * of the first, and as these callbacks will not be called
- * on clean entries, no checks are needed.
- *
- * JRM -- 4/6/15
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -4669,39 +4630,26 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
}
- if ( prev_ptr != NULL ) {
-
+ if(prev_ptr != NULL) {
if(corked) /* dirty corked entry is skipped */
entry_ptr = prev_ptr;
-
- else if ( ( restart_scan )
- ||
- ( prev_ptr->is_dirty != prev_is_dirty )
- ||
- ( prev_ptr->next != next_ptr )
- ||
- ( prev_ptr->is_protected )
- ||
- ( prev_ptr->is_pinned ) ) {
-
- /* something has happened to the LRU -- start over
+ else if(restart_scan || (prev_ptr->is_dirty != prev_is_dirty)
+ || (prev_ptr->next != next_ptr)
+ || (prev_ptr->is_protected)
+ || (prev_ptr->is_pinned)) {
+ /* Something has happened to the LRU -- start over
* from the tail.
*/
restart_scan = FALSE;
entry_ptr = cache_ptr->LRU_tail_ptr;
H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
-
- } else {
-
+ } /* end else-if */
+ else
entry_ptr = prev_ptr;
-
- }
- } else {
-
+ } /* end if */
+ else
entry_ptr = NULL;
-
- }
} /* end while */
/* for now at least, don't bother to maintain the minimum clean size,
@@ -5340,7 +5288,7 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
H5C_cache_entry_t *entry_ptr = NULL;
H5C_cache_entry_t *next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t initial_slist_len = 0;
+ uint32_t initial_slist_len = 0;
size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
herr_t ret_value = SUCCEED;
@@ -5574,8 +5522,8 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
*/
if(node_ptr == NULL) {
- HDassert(cache_ptr->slist_len == (initial_slist_len + cache_ptr->slist_len_increase));
- HDassert((int64_t)cache_ptr->slist_size == ((int64_t)initial_slist_size + cache_ptr->slist_size_increase));
+ HDassert(cache_ptr->slist_len == (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
+ HDassert(cache_ptr->slist_size == (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
} /* end if */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -5777,7 +5725,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t initial_slist_len = 0;
+ uint32_t initial_slist_len = 0;
size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
int i;
@@ -5982,8 +5930,8 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
#if H5C_DO_SANITY_CHECKS
/* Verify that the slist size and length are as expected. */
- HDassert((initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
- HDassert((size_t)((int64_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
+ HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
+ HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
#endif /* H5C_DO_SANITY_CHECKS */
} /* while */
@@ -6929,38 +6877,6 @@ done:
*
* Programmer: John Mainzer, 5/14/04
*
- * Changes: Modified function to skip over entries with the
- * flush_in_progress flag set. If this is not done,
- * an infinite recursion is possible if the cache is
- * full, and the pre-serialize or serialize routine
- * attempts to load another entry.
- *
- * This error was exposed by a re-factor of the
- * H5C__flush_single_entry() routine. However, it was
- * a potential bug from the moment that entries were
- * allowed to load other entries on flush.
- *
- * In passing, note that the primary and secondary dxpls
- * mentioned in the comment above have been replaced by
- * a single dxpl at some point, and thus the discussion
- * above is somewhat obsolete. Date of this change is
- * unkown.
- *
- * JRM -- 12/26/14
- *
- * Modified function to detect deletions of entries
- * during a scan of the LRU, and where appropriate,
- * restart the scan to avoid proceeding with a next
- * entry that is no longer in the cache.
- *
- * Note the absence of checks after flushes of clean
- * entries. As a second entry can only be removed by
- * by a call to the pre_serialize or serialize callback
- * of the first, and as these callbacks will not be called
- * on clean entries, no checks are needed.
- *
- * JRM -- 4/6/15
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -6993,21 +6909,15 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
if ( write_permitted ) {
-
restart_scan = FALSE;
initial_list_len = cache_ptr->LRU_list_len;
entry_ptr = cache_ptr->LRU_tail_ptr;
- if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
-
+ if(cache_ptr->index_size >= cache_ptr->max_cache_size)
empty_space = 0;
-
- } else {
-
+ else
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- }
-
while ( ( ( (cache_ptr->index_size + space_needed)
>
cache_ptr->max_cache_size
@@ -8064,6 +7974,515 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_cache
+ *
+ * Purpose: Serialize (i.e. construct an on disk image) for all entries
+ * in the metadata cache including clean entries.
+ *
+ * Note that flush dependencies and "flush me last" flags
+ * must be observed in the serialization process.
+ *
+ * Note also that entries may be loaded, flushed, evicted,
+ * expunged, relocated, resized, or removed from the cache
+ * during this process, just as these actions may occur during
+ * a regular flush.
+ *
+ * However, we are given that the cache will contain no protected
+ * entries on entry to this routine (although entries may be
+ * briefly protected and then unprotected during the serialize
+ * process).
+ *
+ * The objective of this routine is serialize all entries and
+ * to force all entries into their actual locations on disk.
+ *
+ * The initial need for this routine is to settle all entries
+ * in the cache prior to construction of the metadata cache
+ * image so that the size of the cache image can be calculated.
+ * However, I gather that other uses for the routine are
+ * under consideration.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 7/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__serialize_cache(H5F_t *f, hid_t dxpl_id)
+{
+#if H5C_DO_SANITY_CHECKS
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ H5C_ring_t ring;
+ H5C_t * cache_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+#if H5C_DO_SANITY_CHECKS
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+#ifndef NDEBUG
+ /* if this is a debug build, set the serialization_count field of
+ * each entry in the cache to zero before we start the serialization.
+ * This allows us to detect the case in which any entry is serialized
+ * more than once (a performance issues), and more importantly, the
+ * case is which any flush depencency parent is serializes more than
+ * once (a correctness issue).
+ */
+ {
+ H5C_cache_entry_t * scan_ptr = NULL;
+
+ scan_ptr = cache_ptr->il_head;
+ while(scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ scan_ptr->serialization_count = 0;
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
+
+ /* set cache_ptr->serialization_in_progress to TRUE, and back
+ * to FALSE at the end of the function. Must maintain this flag
+ * to support H5C_get_serialization_in_progress(), which is in
+ * turn required to support sanity checking in some cache
+ * clients.
+ */
+ HDassert(!cache_ptr->serialization_in_progress);
+ cache_ptr->serialization_in_progress = TRUE;
+
+ /* Serialize each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ HDassert(cache_ptr->close_warning_received);
+ switch(ring) {
+ case H5C_RING_USER:
+ break;
+
+ case H5C_RING_RDFSM:
+ /* Settle raw data FSM */
+ if(!cache_ptr->rdfsm_settled)
+ if(H5MF_settle_raw_data_fsm(f, dxpl_id, &cache_ptr->rdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
+ break;
+
+ case H5C_RING_MDFSM:
+ /* Settle metadata FSM */
+ if(!cache_ptr->mdfsm_settled)
+ if(H5MF_settle_meta_data_fsm(f, dxpl_id, &cache_ptr->mdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
+ break;
+
+ case H5C_RING_SBE:
+ case H5C_RING_SB:
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
+ break;
+ } /* end switch */
+
+ if(H5C__serialize_ring(f, dxpl_id, ring) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
+
+ ring++;
+ } /* end while */
+
+#ifndef NDEBUG
+ /* Verify that no entry has been serialized more than once.
+ * FD parents with multiple serializations should have been caught
+ * elsewhere, so no specific check for them here.
+ */
+ {
+ H5C_cache_entry_t * scan_ptr = NULL;
+
+ scan_ptr = cache_ptr->il_head;
+ while(scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(scan_ptr->serialization_count <= 1);
+
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
+
+done:
+ cache_ptr->serialization_in_progress = FALSE;
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_ring
+ *
+ * Purpose: Serialize the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been serialized on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be serialized. However all unprotected entries in the
+ * target ring should be serialized before the function
+ * returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the index list
+ * serializing entries in flush dependency order.
+ *
+ * All entries outside the H5C_RING_SBE are marked for
+ * inclusion in the cache image. Entries in H5C_RING_SBE
+ * and below are marked for exclusion from the image.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/11/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring)
+{
+ hbool_t done = FALSE;
+ H5C_t * cache_ptr;
+ H5C_cache_entry_t * entry_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->serialization_in_progress);
+
+ /* The objective here is to serialize all entries in the cache ring
+ * in flush dependency order.
+ *
+ * The basic algorithm is to scan the cache index list looking for
+ * unserialized entries that are either not in a flush dependency
+ * relationship, or which have no unserialized children. Any such
+ * entry is serialized and its flush dependency parents (if any) are
+ * informed -- allowing them to decrement their userialized child counts.
+ *
+ * However, this algorithm is complicated by the ability
+ * of client serialization callbacks to perform operations on
+ * on the cache which can result in the insertion, deletion,
+ * relocation, resize, dirty, flush, eviction, or removal (via the
+ * take ownership flag) of entries. Changes in the flush dependency
+ * structure are also possible.
+ *
+ * On the other hand, the algorithm is simplified by the fact that
+ * we are serializing, not flushing. Thus, as long as all entries
+ * are serialized correctly, it doesn't matter if we have to go back
+ * and serialize an entry a second time.
+ *
+ * These possible actions result in the following modfications to
+ * tha basic algorithm:
+ *
+ * 1) In the event of an entry expunge, eviction or removal, we must
+ * restart the scan as it is possible that the next entry in our
+ * scan is no longer in the cache. Were we to examine this entry,
+ * we would be accessing deallocated memory.
+ *
+ * 2) A resize, dirty, or insertion of an entry may result in the
+ * the increment of a flush dependency parent's dirty and/or
+ * unserialized child count. In the context of serializing the
+ * the cache, this is a non-issue, as even if we have already
+ * serialized the parent, it will be marked dirty and its image
+ * marked out of date if appropriate when the child is serialized.
+ *
+ * However, this is a major issue for a flush, as were this to happen
+ * in a flush, it would violate the invariant that the flush dependency
+ * feature is intended to enforce. As the metadata cache has no
+ * control over the behavior of cache clients, it has no way of
+ * preventing this behaviour. However, it should detect it if at all
+ * possible.
+ *
+ * Do this by maintaining a count of the number of times each entry is
+ * serialized during a cache serialization. If any flush dependency
+ * parent is serialized more than once, throw an assertion failure.
+ *
+ * 3) An entry relocation will typically change the location of the
+ * entry in the index list. This shouldn't cause problems as we
+ * will scan the index list until we make a complete pass without
+ * finding anything to serialize -- making relocations of either
+ * the current or next entries irrelevant.
+ *
+ * Note that since a relocation may result in our skipping part of
+ * the index list, we must always do at least one more pass through
+ * the index list after an entry relocation.
+ *
+ * 4) Changes in the flush dependency structure are possible on
+ * entry insertion, load, expunge, evict, or remove. Destruction
+ * of a flush dependency has no effect, as it can only relax the
+ * flush dependencies. Creation of a flush dependency can create
+ * an unserialized child of a flush dependency parent where all
+ * flush dependency children were previously serialized. Should
+ * this child dirty the flush dependency parent when it is serialized,
+ * the parent will be re-serialized.
+ *
+ * Per the discussion of 2) above, this is a non issue for cache
+ * serialization, and a major problem for cache flush. Using the
+ * same detection mechanism, throw an assertion failure if this
+ * condition appears.
+ *
+ * Observe that either eviction or removal of entries as a result of
+ * a serialization is not a problem as long as the flush depencency
+ * tree does not change beyond the removal of a leaf.
+ */
+ while(!done) {
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ done = TRUE; /* set to FALSE if any activity in inner loop */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Verify that either the entry is already serialized, or
+ * that it is assigned to either the target or an inner
+ * ring.
+ */
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ /* Skip flush me last entries or inner ring entries */
+ if(!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
+
+ /* if we encounter an unserialized entry in the current
+ * ring that is not marked flush me last, we are not done.
+ */
+ if(!entry_ptr->image_up_to_date)
+ done = FALSE;
+
+ /* Serialize the entry if its image is not up to date
+ * and it has no unserialized flush dependency children.
+ */
+ if(!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
+ HDassert(entry_ptr->serialization_count == 0);
+
+ /* Serialize the entry */
+ if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
+
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+
+ /* Check for the cache being perturbed during the entry serialize */
+ if((cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0)) {
+
+#if H5C_COLLECT_CACHE_STATS
+ H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ /* Reset the counters */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* Restart scan */
+ entry_ptr = cache_ptr->il_head;
+ } /* end if */
+ else
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+ } /* while ( ! done ) */
+
+
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* At this point, all entries not marked "flush me last" and in
+ * the current ring or outside it should be serialized and have up
+ * to date images. Scan the index list again to serialize the
+ * "flush me last" entries (if they are in the current ring) and to
+ * verify that all other entries have up to date images.
+ */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
+ HDassert(entry_ptr->ring < H5C_RING_NTYPES);
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ if(entry_ptr->ring == ring) {
+ if(entry_ptr->flush_me_last) {
+ if(!entry_ptr->image_up_to_date) {
+ HDassert(entry_ptr->serialization_count == 0);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+
+ /* Serialize the entry */
+ if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ /* Check for the cache changing */
+ if((cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush_me_last entry serialization triggered restart")
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+ else {
+ HDassert(entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->serialization_count <= 1);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ } /* end else */
+ } /* if ( entry_ptr->ring == ring ) */
+
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+
+done:
+ HDassert(cache_ptr->serialization_in_progress);
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_ring() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_single_entry
+ *
+ * Purpose: Serialize the cache entry pointed to by the entry_ptr
+ * parameter.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer, 7/24/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
+ H5C_cache_entry_t *entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->prefetched);
+ HDassert(!entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->flush_in_progress);
+ HDassert(entry_ptr->type);
+
+ /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
+ * will not be evicted out from under us. Must set it back to FALSE
+ * when we are done.
+ */
+ entry_ptr->flush_in_progress = TRUE;
+
+ /* Allocate buffer for the entry image if required. */
+ if(NULL == entry_ptr->image_ptr) {
+ HDassert(entry_ptr->size > 0);
+ if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)) )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ } /* end if */
+
+ /* Generate image for entry */
+ if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
+
+ /* Reset the flush_in progress flag */
+ entry_ptr->flush_in_progress = FALSE;
+
+done:
+ HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
+ HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_single_entry() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C__generate_image
*
* Purpose: Serialize an entry and generate its image.