summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/H5C.c569
-rw-r--r--src/H5Cdbg.c14
-rw-r--r--src/H5Cimage.c891
-rw-r--r--src/H5Cmpio.c15
-rw-r--r--src/H5Cpkg.h33
-rw-r--r--src/H5Cprivate.h5
-rw-r--r--src/H5Fsuper.c17
-rw-r--r--src/H5HFcache.c4
-rw-r--r--test/cache.c12
-rw-r--r--test/cache_common.c4
-rw-r--r--test/cache_image.c2
-rw-r--r--test/dsets.c10
-rw-r--r--test/fheap.c104
-rw-r--r--test/fillval.c2
-rw-r--r--test/freespace.c10
-rw-r--r--test/links.c2
-rw-r--r--test/objcopy.c2
-rw-r--r--test/testflushrefresh.sh.in4
-rw-r--r--testpar/t_cache.c10
-rw-r--r--tools/test/h5format_convert/h5fc_gentest.c3
20 files changed, 786 insertions, 927 deletions
diff --git a/src/H5C.c b/src/H5C.c
index fe97f05..805b4f5 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -159,6 +159,10 @@ static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry);
static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry);
+static herr_t H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring);
+static herr_t H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id,
+ H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
+
static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t * type,
haddr_t addr, size_t *len, hbool_t actual);
@@ -750,20 +754,10 @@ H5C_prep_for_file_close(H5F_t *f, hid_t dxpl_id)
/* Make certain there aren't any protected entries */
HDassert(cache_ptr->pl_len == 0);
- /* If the file is opened and closed without any access to
- * any group or dataset, it is possible that the cache image (if
- * it exists) has not been read yet. Do this now if required.
- */
- if(cache_ptr->load_image) {
- cache_ptr->load_image = FALSE;
- if(H5C__load_cache_image(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't load cache image")
- } /* end if */
+ /* Prepare cache image */
+ if(H5C__prep_image_for_file_close(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cache image")
- /* Generate the cache image, if requested */
- if(cache_ptr->image_ctl.generate_image)
- if(H5C__prep_image_for_file_close(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "Can't create cache image")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1096,36 +1090,20 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
break;
case H5C_RING_RDFSM:
- if(!cache_ptr->rdfsm_settled) {
- hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
-
- /* Settle raw data FSM */
- if(H5MF_settle_raw_data_fsm(f, dxpl_id, &fsm_settled) < 0)
+ /* Settle raw data FSM */
+ if(!cache_ptr->rdfsm_settled)
+ if(H5MF_settle_raw_data_fsm(f, dxpl_id, &cache_ptr->rdfsm_settled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
-
- /* Only set the flag if the FSM was actually settled */
- if(fsm_settled)
- cache_ptr->rdfsm_settled = TRUE;
- } /* end if */
break;
case H5C_RING_MDFSM:
- if(!cache_ptr->mdfsm_settled) {
- hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
-
- /* Settle metadata FSM */
- if(H5MF_settle_meta_data_fsm(f, dxpl_id, &fsm_settled) < 0)
+ /* Settle metadata FSM */
+ if(!cache_ptr->mdfsm_settled)
+ if(H5MF_settle_meta_data_fsm(f, dxpl_id, &cache_ptr->mdfsm_settled) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
-
- /* Only set the flag if the FSM was actually settled */
- if(fsm_settled)
- cache_ptr->mdfsm_settled = TRUE;
- } /* end if */
break;
case H5C_RING_SBE:
- break;
-
case H5C_RING_SB:
break;
@@ -5310,7 +5288,7 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
H5C_cache_entry_t *entry_ptr = NULL;
H5C_cache_entry_t *next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t initial_slist_len = 0;
+ uint32_t initial_slist_len = 0;
size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
herr_t ret_value = SUCCEED;
@@ -5544,8 +5522,8 @@ H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
*/
if(node_ptr == NULL) {
- HDassert(cache_ptr->slist_len == (initial_slist_len + cache_ptr->slist_len_increase));
- HDassert((int64_t)cache_ptr->slist_size == ((int64_t)initial_slist_size + cache_ptr->slist_size_increase));
+ HDassert(cache_ptr->slist_len == (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
+ HDassert(cache_ptr->slist_size == (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
} /* end if */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -5747,7 +5725,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t initial_slist_len = 0;
+ uint32_t initial_slist_len = 0;
size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
int i;
@@ -5952,8 +5930,8 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
#if H5C_DO_SANITY_CHECKS
/* Verify that the slist size and length are as expected. */
- HDassert((initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
- HDassert((size_t)((int64_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
+ HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
+ HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
#endif /* H5C_DO_SANITY_CHECKS */
} /* while */
@@ -7996,6 +7974,515 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_cache
+ *
+ * Purpose: Serialize (i.e. construct an on disk image) for all entries
+ * in the metadata cache including clean entries.
+ *
+ * Note that flush dependencies and "flush me last" flags
+ * must be observed in the serialization process.
+ *
+ * Note also that entries may be loaded, flushed, evicted,
+ * expunged, relocated, resized, or removed from the cache
+ * during this process, just as these actions may occur during
+ * a regular flush.
+ *
+ * However, we are given that the cache will contain no protected
+ * entries on entry to this routine (although entries may be
+ * briefly protected and then unprotected during the serialize
+ * process).
+ *
+ * The objective of this routine is serialize all entries and
+ * to force all entries into their actual locations on disk.
+ *
+ * The initial need for this routine is to settle all entries
+ * in the cache prior to construction of the metadata cache
+ * image so that the size of the cache image can be calculated.
+ * However, I gather that other uses for the routine are
+ * under consideration.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 7/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__serialize_cache(H5F_t *f, hid_t dxpl_id)
+{
+#if H5C_DO_SANITY_CHECKS
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ H5C_ring_t ring;
+ H5C_t * cache_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+#if H5C_DO_SANITY_CHECKS
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+#ifndef NDEBUG
+ /* if this is a debug build, set the serialization_count field of
+ * each entry in the cache to zero before we start the serialization.
+ * This allows us to detect the case in which any entry is serialized
+ * more than once (a performance issues), and more importantly, the
+ * case is which any flush depencency parent is serializes more than
+ * once (a correctness issue).
+ */
+ {
+ H5C_cache_entry_t * scan_ptr = NULL;
+
+ scan_ptr = cache_ptr->il_head;
+ while(scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ scan_ptr->serialization_count = 0;
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
+
+ /* set cache_ptr->serialization_in_progress to TRUE, and back
+ * to FALSE at the end of the function. Must maintain this flag
+ * to support H5C_get_serialization_in_progress(), which is in
+ * turn required to support sanity checking in some cache
+ * clients.
+ */
+ HDassert(!cache_ptr->serialization_in_progress);
+ cache_ptr->serialization_in_progress = TRUE;
+
+ /* Serialize each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ HDassert(cache_ptr->close_warning_received);
+ switch(ring) {
+ case H5C_RING_USER:
+ break;
+
+ case H5C_RING_RDFSM:
+ /* Settle raw data FSM */
+ if(!cache_ptr->rdfsm_settled)
+ if(H5MF_settle_raw_data_fsm(f, dxpl_id, &cache_ptr->rdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
+ break;
+
+ case H5C_RING_MDFSM:
+ /* Settle metadata FSM */
+ if(!cache_ptr->mdfsm_settled)
+ if(H5MF_settle_meta_data_fsm(f, dxpl_id, &cache_ptr->mdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
+ break;
+
+ case H5C_RING_SBE:
+ case H5C_RING_SB:
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
+ break;
+ } /* end switch */
+
+ if(H5C__serialize_ring(f, dxpl_id, ring) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
+
+ ring++;
+ } /* end while */
+
+#ifndef NDEBUG
+ /* Verify that no entry has been serialized more than once.
+ * FD parents with multiple serializations should have been caught
+ * elsewhere, so no specific check for them here.
+ */
+ {
+ H5C_cache_entry_t * scan_ptr = NULL;
+
+ scan_ptr = cache_ptr->il_head;
+ while(scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(scan_ptr->serialization_count <= 1);
+
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
+
+done:
+ cache_ptr->serialization_in_progress = FALSE;
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_ring
+ *
+ * Purpose: Serialize the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been serialized on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be serialized. However all unprotected entries in the
+ * target ring should be serialized before the function
+ * returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the index list
+ * serializing entries in flush dependency order.
+ *
+ * All entries outside the H5C_RING_SBE are marked for
+ * inclusion in the cache image. Entries in H5C_RING_SBE
+ * and below are marked for exclusion from the image.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/11/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring)
+{
+ hbool_t done = FALSE;
+ H5C_t * cache_ptr;
+ H5C_cache_entry_t * entry_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->serialization_in_progress);
+
+ /* The objective here is to serialize all entries in the cache ring
+ * in flush dependency order.
+ *
+ * The basic algorithm is to scan the cache index list looking for
+ * unserialized entries that are either not in a flush dependency
+ * relationship, or which have no unserialized children. Any such
+ * entry is serialized and its flush dependency parents (if any) are
+ * informed -- allowing them to decrement their userialized child counts.
+ *
+ * However, this algorithm is complicated by the ability
+ * of client serialization callbacks to perform operations on
+ * on the cache which can result in the insertion, deletion,
+ * relocation, resize, dirty, flush, eviction, or removal (via the
+ * take ownership flag) of entries. Changes in the flush dependency
+ * structure are also possible.
+ *
+ * On the other hand, the algorithm is simplified by the fact that
+ * we are serializing, not flushing. Thus, as long as all entries
+ * are serialized correctly, it doesn't matter if we have to go back
+ * and serialize an entry a second time.
+ *
+ * These possible actions result in the following modfications to
+ * tha basic algorithm:
+ *
+ * 1) In the event of an entry expunge, eviction or removal, we must
+ * restart the scan as it is possible that the next entry in our
+ * scan is no longer in the cache. Were we to examine this entry,
+ * we would be accessing deallocated memory.
+ *
+ * 2) A resize, dirty, or insertion of an entry may result in the
+ * the increment of a flush dependency parent's dirty and/or
+ * unserialized child count. In the context of serializing the
+ * the cache, this is a non-issue, as even if we have already
+ * serialized the parent, it will be marked dirty and its image
+ * marked out of date if appropriate when the child is serialized.
+ *
+ * However, this is a major issue for a flush, as were this to happen
+ * in a flush, it would violate the invariant that the flush dependency
+ * feature is intended to enforce. As the metadata cache has no
+ * control over the behavior of cache clients, it has no way of
+ * preventing this behaviour. However, it should detect it if at all
+ * possible.
+ *
+ * Do this by maintaining a count of the number of times each entry is
+ * serialized during a cache serialization. If any flush dependency
+ * parent is serialized more than once, throw an assertion failure.
+ *
+ * 3) An entry relocation will typically change the location of the
+ * entry in the index list. This shouldn't cause problems as we
+ * will scan the index list until we make a complete pass without
+ * finding anything to serialize -- making relocations of either
+ * the current or next entries irrelevant.
+ *
+ * Note that since a relocation may result in our skipping part of
+ * the index list, we must always do at least one more pass through
+ * the index list after an entry relocation.
+ *
+ * 4) Changes in the flush dependency structure are possible on
+ * entry insertion, load, expunge, evict, or remove. Destruction
+ * of a flush dependency has no effect, as it can only relax the
+ * flush dependencies. Creation of a flush dependency can create
+ * an unserialized child of a flush dependency parent where all
+ * flush dependency children were previously serialized. Should
+ * this child dirty the flush dependency parent when it is serialized,
+ * the parent will be re-serialized.
+ *
+ * Per the discussion of 2) above, this is a non issue for cache
+ * serialization, and a major problem for cache flush. Using the
+ * same detection mechanism, throw an assertion failure if this
+ * condition appears.
+ *
+ * Observe that either eviction or removal of entries as a result of
+ * a serialization is not a problem as long as the flush depencency
+ * tree does not change beyond the removal of a leaf.
+ */
+ while(!done) {
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ done = TRUE; /* set to FALSE if any activity in inner loop */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Verify that either the entry is already serialized, or
+ * that it is assigned to either the target or an inner
+ * ring.
+ */
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ /* Skip flush me last entries or inner ring entries */
+ if(!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
+
+ /* if we encounter an unserialized entry in the current
+ * ring that is not marked flush me last, we are not done.
+ */
+ if(!entry_ptr->image_up_to_date)
+ done = FALSE;
+
+ /* Serialize the entry if its image is not up to date
+ * and it has no unserialized flush dependency children.
+ */
+ if(!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
+ HDassert(entry_ptr->serialization_count == 0);
+
+ /* Serialize the entry */
+ if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
+
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+
+ /* Check for the cache being perturbed during the entry serialize */
+ if((cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0)) {
+
+#if H5C_COLLECT_CACHE_STATS
+ H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ /* Reset the counters */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* Restart scan */
+ entry_ptr = cache_ptr->il_head;
+ } /* end if */
+ else
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+ } /* while ( ! done ) */
+
+
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* At this point, all entries not marked "flush me last" and in
+ * the current ring or outside it should be serialized and have up
+ * to date images. Scan the index list again to serialize the
+ * "flush me last" entries (if they are in the current ring) and to
+ * verify that all other entries have up to date images.
+ */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
+ HDassert(entry_ptr->ring < H5C_RING_NTYPES);
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ if(entry_ptr->ring == ring) {
+ if(entry_ptr->flush_me_last) {
+ if(!entry_ptr->image_up_to_date) {
+ HDassert(entry_ptr->serialization_count == 0);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+
+ /* Serialize the entry */
+ if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ /* Check for the cache changing */
+ if((cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush_me_last entry serialization triggered restart")
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+ else {
+ HDassert(entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->serialization_count <= 1);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ } /* end else */
+ } /* if ( entry_ptr->ring == ring ) */
+
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+
+done:
+ HDassert(cache_ptr->serialization_in_progress);
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_ring() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_single_entry
+ *
+ * Purpose: Serialize the cache entry pointed to by the entry_ptr
+ * parameter.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer, 7/24/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
+ H5C_cache_entry_t *entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->prefetched);
+ HDassert(!entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->flush_in_progress);
+ HDassert(entry_ptr->type);
+
+ /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
+ * will not be evicted out from under us. Must set it back to FALSE
+ * when we are done.
+ */
+ entry_ptr->flush_in_progress = TRUE;
+
+ /* Allocate buffer for the entry image if required. */
+ if(NULL == entry_ptr->image_ptr) {
+ HDassert(entry_ptr->size > 0);
+ if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)) )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ } /* end if */
+
+ /* Generate image for entry */
+ if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
+
+ /* Reset the flush_in progress flag */
+ entry_ptr->flush_in_progress = FALSE;
+
+done:
+ HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
+ HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_single_entry() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C__generate_image
*
* Purpose: Serialize an entry and generate its image.
diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c
index 205567f..eb5f123 100644
--- a/src/H5Cdbg.c
+++ b/src/H5Cdbg.c
@@ -54,10 +54,6 @@
/* Local Prototypes */
/********************/
-#if 0 /* debugging routines */
-herr_t H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn);
-#endif /* debugging routines */
-
/*********************/
/* Package Variables */
@@ -195,7 +191,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-#if 0 /* debugging routine */
+#ifndef NDEBUG
herr_t
H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
{
@@ -204,7 +200,7 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
H5C_cache_entry_t * entry_ptr = NULL;
H5SL_node_t * node_ptr = NULL;
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI_NOERR
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
@@ -258,10 +254,9 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
HDfprintf(stdout, "\n\n");
-done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_dump_cache_skip_list() */
-#endif /* debugging routine */
+#endif /* NDEBUG */
/*-------------------------------------------------------------------------
@@ -1325,11 +1320,12 @@ H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring)
while(ring <= inner_ring) {
if(cache_ptr->dirty_index_ring_size[ring] > 0)
- ret_value = FALSE;
+ HGOTO_DONE(FALSE)
ring++;
} /* end while */
+done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_cache_is_clean() */
#endif /* NDEBUG */
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
index 5374b41..1da2545 100644
--- a/src/H5Cimage.c
+++ b/src/H5Cimage.c
@@ -120,11 +120,6 @@ static herr_t H5C__reconstruct_cache_contents(H5F_t *f, hid_t dxpl_id,
H5C_t *cache_ptr);
static H5C_cache_entry_t *H5C__reconstruct_cache_entry(const H5F_t *f,
H5C_t *cache_ptr, const uint8_t **buf);
-static herr_t H5C__serialize_cache(H5F_t *f, hid_t dxpl_id);
-static herr_t H5C__serialize_ring(H5F_t *f, hid_t dxpl_id,
- H5C_ring_t ring);
-static herr_t H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id,
- H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
static herr_t H5C__write_cache_image_superblock_msg(H5F_t *f, hid_t dxpl_id,
hbool_t create);
static herr_t H5C__read_cache_image(H5F_t * f, hid_t dxpl_id, const H5C_t *cache_ptr);
@@ -1257,186 +1252,199 @@ H5C__prep_image_for_file_close(H5F_t *f, hid_t dxpl_id)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- /* Create the cache image super block extension message.
- *
- * Note that the base address and length of the metadata cache
- * image are undefined at this point, and thus will have to be
- * updated later.
- *
- * Create the super block extension message now so that space
- * is allocated for it (if necessary) before we allocate space
- * for the cache image block.
- *
- * To simplify testing, do this only if the
- * H5C_CI__GEN_MDCI_SBE_MESG bit is set in
- * cache_ptr->image_ctl.flags.
+ /* If the file is opened and closed without any access to
+ * any group or data set, it is possible that the cache image (if
+ * it exists) has not been read yet. Do this now if required.
*/
- if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDCI_SBE_MESG)
- if(H5C__write_cache_image_superblock_msg(f, dxpl_id, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "creation of cache image SB mesg failed.")
+ if(cache_ptr->load_image) {
+ cache_ptr->load_image = FALSE;
+ if(H5C__load_cache_image(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "can't load cache image")
+ } /* end if */
- /* Serialize the cache */
- if(H5C__serialize_cache(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "serialization of the cache failed")
+ /* Generate the cache image, if requested */
+ if(cache_ptr->image_ctl.generate_image) {
+ /* Create the cache image super block extension message.
+ *
+ * Note that the base address and length of the metadata cache
+ * image are undefined at this point, and thus will have to be
+ * updated later.
+ *
+ * Create the super block extension message now so that space
+ * is allocated for it (if necessary) before we allocate space
+ * for the cache image block.
+ *
+ * To simplify testing, do this only if the
+ * H5C_CI__GEN_MDCI_SBE_MESG bit is set in
+ * cache_ptr->image_ctl.flags.
+ */
+ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDCI_SBE_MESG)
+ if(H5C__write_cache_image_superblock_msg(f, dxpl_id, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "creation of cache image SB mesg failed.")
- /* Scan the cache and record data needed to construct the
- * cache image. In particular, for each entry we must record:
- *
- * 1) rank in LRU (if entry is in LRU)
- *
- * 2) Whether the entry is dirty prior to flush of
- * cache just prior to close.
- *
- * 3) Addresses of flush dependency parents (if any).
- *
- * 4) Number of flush dependency children (if any).
- *
- * In passing, also compute the size of the metadata cache
- * image. With the recent modifications of the free space
- * manager code, this size should be correct.
- */
- if(H5C__prep_for_file_close__scan_entries(f, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__prep_for_file_close__scan_entries failed")
- HDassert(HADDR_UNDEF == cache_ptr->image_addr);
+ /* Serialize the cache */
+ if(H5C__serialize_cache(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "serialization of the cache failed")
+
+ /* Scan the cache and record data needed to construct the
+ * cache image. In particular, for each entry we must record:
+ *
+ * 1) rank in LRU (if entry is in LRU)
+ *
+ * 2) Whether the entry is dirty prior to flush of
+ * cache just prior to close.
+ *
+ * 3) Addresses of flush dependency parents (if any).
+ *
+ * 4) Number of flush dependency children (if any).
+ *
+ * In passing, also compute the size of the metadata cache
+ * image. With the recent modifications of the free space
+ * manager code, this size should be correct.
+ */
+ if(H5C__prep_for_file_close__scan_entries(f, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__prep_for_file_close__scan_entries failed")
+ HDassert(HADDR_UNDEF == cache_ptr->image_addr);
#ifdef H5_HAVE_PARALLEL
- /* In the parallel case, overwrite the image_len with the
- * value computed by process 0.
- */
- if(cache_ptr->aux_ptr) { /* we have multiple processes */
- int mpi_result;
- unsigned p0_image_len;
- H5AC_aux_t * aux_ptr;
+ /* In the parallel case, overwrite the image_len with the
+ * value computed by process 0.
+ */
+ if(cache_ptr->aux_ptr) { /* we have multiple processes */
+ int mpi_result;
+ unsigned p0_image_len;
+ H5AC_aux_t * aux_ptr;
- aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
- if(aux_ptr->mpi_rank == 0) {
- aux_ptr->p0_image_len = (unsigned)cache_ptr->image_data_len;
- p0_image_len = aux_ptr->p0_image_len;
+ aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
+ if(aux_ptr->mpi_rank == 0) {
+ aux_ptr->p0_image_len = (unsigned)cache_ptr->image_data_len;
+ p0_image_len = aux_ptr->p0_image_len;
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
- HDassert(p0_image_len == aux_ptr->p0_image_len);
+ HDassert(p0_image_len == aux_ptr->p0_image_len);
+ } /* end if */
+ else {
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
+
+ aux_ptr->p0_image_len = p0_image_len;
+ } /* end else */
+
+ /* Allocate space for a cache image of size equal to that
+ * computed by the process 0. This may be different from
+ * cache_ptr->image_data_len if mpi_rank != 0. However, since
+ * cache image write is suppressed on all processes other than
+ * process 0, this doesn't matter.
+ *
+ * Note that we allocate the cache image directly from the file
+ * driver so as to avoid unsettling the free space managers.
+ */
+ if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, dxpl_id, H5FD_MEM_SUPER, f,
+ (hsize_t)p0_image_len, &eoa_frag_addr, &eoa_frag_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, "can't allocate file space for metadata cache image")
} /* end if */
- else {
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
-
- aux_ptr->p0_image_len = p0_image_len;
- } /* end else */
+ else
+#endif /* H5_HAVE_PARALLEL */
+ /* Allocate the cache image block. Note that we allocate this
+ * this space directly from the file driver so as to avoid
+ * unsettling the free space managers.
+ */
+ if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, dxpl_id, H5FD_MEM_SUPER, f,
+ (hsize_t)(cache_ptr->image_data_len), &eoa_frag_addr, &eoa_frag_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, "can't allocate file space for metadata cache image")
- /* Allocate space for a cache image of size equal to that
- * computed by the process 0. This may be different from
- * cache_ptr->image_data_len if mpi_rank != 0. However, since
- * cache image write is suppressed on all processes other than
- * process 0, this doesn't matter.
+ /* For now, drop any fragment left over from the allocation of the
+ * image block on the ground. A fragment should only be returned
+ * if the underlying file alignment is greater than 1.
*
- * Note that we allocate the cache image directly from the file
- * driver so as to avoid unsettling the free space managers.
+ * Clean this up eventually by extending the size of the cache
+ * image block to the next alignement boundary, and then setting
+ * the image_data_len to the actual size of the cache_image.
+ *
+ * On the off chance that there is some other way to get a
+ * a fragment on a cache image allocation, leave the following
+ * assertion in the code so we will find out.
*/
- if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, dxpl_id, H5FD_MEM_SUPER, f,
- (hsize_t)p0_image_len, &eoa_frag_addr, &eoa_frag_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, "can't allocate file space for metadata cache image")
- } /* end if */
- else
-#endif /* H5_HAVE_PARALLEL */
- /* Allocate the cache image block. Note that we allocate this
- * this space directly from the file driver so as to avoid
- * unsettling the free space managers.
+ HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1));
+
+ /* Eventually it will be possible for the length of the cache image
+ * block on file to be greater than the size of the data it
+ * contains. However, for now they must be the same. Set
+ * cache_ptr->image_len accordingly.
*/
- if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, dxpl_id, H5FD_MEM_SUPER, f,
- (hsize_t)(cache_ptr->image_data_len), &eoa_frag_addr, &eoa_frag_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, "can't allocate file space for metadata cache image")
+ cache_ptr->image_len = cache_ptr->image_data_len;
- /* For now, drop any fragment left over from the allocation of the
- * image block on the ground. A fragment should only be returned
- * if the underlying file alignment is greater than 1.
- *
- * Clean this up eventually by extending the size of the cache
- * image block to the next alignement boundary, and then setting
- * the image_data_len to the actual size of the cache_image.
- *
- * On the off chance that there is some other way to get a
- * a fragment on a cache image allocation, leave the following
- * assertion in the code so we will find out.
- */
- HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1));
+ /* update the metadata cache image superblock extension
+ * message with the new cache image block base address and
+ * length.
+ *
+ * to simplify testing, do this only if the
+ * H5C_CI__GEN_MDC_IMAGE_BLK bit is set in
+ * cache_ptr->image_ctl.flags.
+ */
+ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
+ if(H5C__write_cache_image_superblock_msg(f, dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "update of cache image SB mesg failed.")
- /* Eventually it will be possible for the length of the cache image
- * block on file to be greater than the size of the data it
- * contains. However, for now they must be the same. Set
- * cache_ptr->image_len accordingly.
- */
- cache_ptr->image_len = cache_ptr->image_data_len;
+ /* At this point:
+ *
+ * 1) space in the file for the metadata cache image
+ * is allocated,
+ *
+ * 2) the metadata cache image superblock extension
+ * message exists and (if so configured) contains
+ * the correct data,
+ *
+ * 3) All entries in the cache that will appear in the
+ * cache image are serialized with up to date images.
+ *
+ * Since we just updated the cache image message,
+ * the super block extension message is dirty. However,
+ * since the superblock and the superblock extension
+ * can't be included in the cache image, this is a non-
+ * issue.
+ *
+ * 4) All entries in the cache that will be include in
+ * the cache are marked as such, and we have a count
+ * of same.
+ *
+ * 5) Flush dependency heights are calculated for all
+ * entries that will be included in the cache image.
+ *
+ * If there are any entries to be included in the metadata cache
+ * image, allocate, populate, and sort the image_entries array.
+ *
+ * If the metadata cache image will be empty, delete the
+ * metadata cache image superblock extension message, set
+ * cache_ptr->image_ctl.generate_image to FALSE. This will
+ * allow the file close to continue normally without the
+ * unecessary generation of the metadata cache image.
+ */
+ if(cache_ptr->num_entries_in_image > 0) {
+ if(H5C__prep_for_file_close__setup_image_entries_array(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINIT, FAIL, "can't setup image entries array.")
- /* update the metadata cache image superblock extension
- * message with the new cache image block base address and
- * length.
- *
- * to simplify testing, do this only if the
- * H5C_CI__GEN_MDC_IMAGE_BLK bit is set in
- * cache_ptr->image_ctl.flags.
- */
- if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
- if(H5C__write_cache_image_superblock_msg(f, dxpl_id, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "update of cache image SB mesg failed.")
+ /* Sort the entries */
+ HDqsort(cache_ptr->image_entries, (size_t)cache_ptr->num_entries_in_image,
+ sizeof(H5C_image_entry_t), H5C__image_entry_cmp);
+ } /* end if */
+ else { /* cancel creation of metadata cache image */
+ HDassert(cache_ptr->image_entries == NULL);
- /* At this point:
- *
- * 1) space in the file for the metadata cache image
- * is allocated,
- *
- * 2) the metadata cache image superblock extension
- * message exists and (if so configured) contains
- * the correct data,
- *
- * 3) All entries in the cache that will appear in the
- * cache image are serialized with up to date images.
- *
- * Since we just updated the cache image message,
- * the super block extension message is dirty. However,
- * since the superblock and the superblock extension
- * can't be included in the cache image, this is a non-
- * issue.
- *
- * 4) All entries in the cache that will be include in
- * the cache are marked as such, and we have a count
- * of same.
- *
- * 5) Flush dependency heights are calculated for all
- * entries that will be included in the cache image.
- *
- * If there are any entries to be included in the metadata cache
- * image, allocate, populate, and sort the image_entries array.
- *
- * If the metadata cache image will be empty, delete the
- * metadata cache image superblock extension message, set
- * cache_ptr->image_ctl.generate_image to FALSE. This will
- * allow the file close to continue normally without the
- * unecessary generation of the metadata cache image.
- */
- if(cache_ptr->num_entries_in_image > 0) {
- if(H5C__prep_for_file_close__setup_image_entries_array(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINIT, FAIL, "can't setup image entries array.")
+ /* To avoid breaking the control flow tests, only delete
+ * the mdci superblock extension message if the
+ * H5C_CI__GEN_MDC_IMAGE_BLK flag is set in
+ * cache_ptr->image_ctl.flags.
+ */
+ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
+ if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_MDCI_MSG_ID) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove MDC image msg from superblock ext.")
- /* Sort the entries */
- HDqsort(cache_ptr->image_entries, (size_t)cache_ptr->num_entries_in_image,
- sizeof(H5C_image_entry_t), H5C__image_entry_cmp);
+ cache_ptr->image_ctl.generate_image = FALSE;
+ } /* end else */
} /* end if */
- else { /* cancel creation of metadata cache iamge */
- HDassert(cache_ptr->image_entries == NULL);
-
- /* To avoid breaking the control flow tests, only delete
- * the mdci superblock extension message if the
- * H5C_CI__GEN_MDC_IMAGE_BLK flag is set in
- * cache_ptr->image_ctl.flags.
- */
- if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
- if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_MDCI_MSG_ID) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove MDC image msg from superblock ext.")
-
- cache_ptr->image_ctl.generate_image = FALSE;
- } /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1706,7 +1714,7 @@ H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr,
actual_header_len = (size_t)(p - *buf);
expected_header_len = H5C__cache_image_block_header_size(f);
if(actual_header_len != expected_header_len)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad header image len.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad header image len")
/* Update buffer pointer */
*buf = p;
@@ -2560,24 +2568,15 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr)
HDassert(cache_ptr->image_entries == NULL);
/* Allocate and initialize image_entries array */
- if(NULL == (image_entries = (H5C_image_entry_t *)H5MM_malloc(sizeof(H5C_image_entry_t) * (size_t)(cache_ptr->num_entries_in_image + 1))))
+ if(NULL == (image_entries = (H5C_image_entry_t *)H5MM_calloc(sizeof(H5C_image_entry_t) * (size_t)(cache_ptr->num_entries_in_image + 1))))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for image_entries")
+ /* Initialize (non-zero/NULL/FALSE) fields */
for(u = 0; u <= cache_ptr->num_entries_in_image; u++) {
image_entries[u].magic = H5C_IMAGE_ENTRY_T_MAGIC;
image_entries[u].addr = HADDR_UNDEF;
- image_entries[u].size = 0;
image_entries[u].ring = H5C_RING_UNDEFINED;
- image_entries[u].age = 0;
image_entries[u].type_id = -1;
- image_entries[u].lru_rank = 0;
- image_entries[u].is_dirty = FALSE;
- image_entries[u].image_fd_height = 0;
- image_entries[u].fd_parent_count = 0;
- image_entries[u].fd_parent_addrs = NULL;
- image_entries[u].fd_child_count = 0;
- image_entries[u].fd_dirty_child_count = 0;
- image_entries[u].image_ptr = NULL;
} /* end for */
/* Scan each entry on the index list and populate the image_entries array */
@@ -3146,6 +3145,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
hbool_t is_fd_child = FALSE;
#endif /* NDEBUG */ /* only used in assertions */
const uint8_t * p;
+ hbool_t file_is_rw;
H5C_cache_entry_t *ret_value = NULL; /* Return value */
FUNC_ENTER_STATIC
@@ -3156,13 +3156,16 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
HDassert(cache_ptr->num_entries_in_image > 0);
HDassert(buf && *buf);
- /* Get pointer to buffer */
- p = *buf;
+ /* Key R/W access off of whether the image will be deleted */
+ file_is_rw = cache_ptr->delete_image;
/* Allocate space for the prefetched cache entry */
if(NULL == (pf_entry_ptr = H5FL_CALLOC(H5C_cache_entry_t)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for prefetched cache entry")
+ /* Get pointer to buffer */
+ p = *buf;
+
/* Decode type id */
pf_entry_ptr->prefetch_type_id = *p++;
@@ -3185,7 +3188,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
* extension message and the cache image block will not be removed.
* Hence no danger in this.
*/
- pf_entry_ptr->is_dirty = (is_dirty && cache_ptr->delete_image);
+ pf_entry_ptr->is_dirty = (is_dirty && file_is_rw);
/* Decode ring */
pf_entry_ptr->ring = *p++;
@@ -3253,15 +3256,12 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
HDmemcpy(pf_entry_ptr->image_ptr, p, pf_entry_ptr->size);
p += pf_entry_ptr->size;
-
/* Initialize the rest of the fields in the prefetched entry */
/* (Only need to set non-zero/NULL/FALSE fields, due to calloc() above) */
pf_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
pf_entry_ptr->cache_ptr = cache_ptr;
pf_entry_ptr->image_up_to_date = TRUE;
pf_entry_ptr->type = H5AC_PREFETCHED_ENTRY;
-
- /* Initialize cache image related fields */
pf_entry_ptr->prefetched = TRUE;
/* Sanity checks */
@@ -3281,529 +3281,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5C__serialize_cache
- *
- * Purpose: Serialize (i.e. construct an on disk image) for all entries
- * in the metadata cache including clean entries.
- *
- * Note that flush dependencies and "flush me last" flags
- * must be observed in the serialization process.
- *
- * Note also that entries may be loaded, flushed, evicted,
- * expunged, relocated, resized, or removed from the cache
- * during this process, just as these actions may occur during
- * a regular flush.
- *
- * However, we are given that the cache will contain no protected
- * entries on entry to this routine (although entries may be
- * briefly protected and then unprotected during the serialize
- * process).
- *
- * The objective of this routine is serialize all entries and
- * to force all entries into their actual locations on disk.
- *
- * The initial need for this routine is to settle all entries
- * in the cache prior to construction of the metadata cache
- * image so that the size of the cache image can be calculated.
- * However, I gather that other uses for the routine are
- * under consideration.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 7/22/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__serialize_cache(H5F_t *f, hid_t dxpl_id)
-{
-#if H5C_DO_SANITY_CHECKS
- int i;
- uint32_t index_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
- uint32_t slist_len = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- H5C_ring_t ring;
- H5C_t * cache_ptr;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_STATIC
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
-
-#if H5C_DO_SANITY_CHECKS
- HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
-
- for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
- index_len += cache_ptr->index_ring_len[i];
- index_size += cache_ptr->index_ring_size[i];
- clean_index_size += cache_ptr->clean_index_ring_size[i];
- dirty_index_size += cache_ptr->dirty_index_ring_size[i];
-
- slist_len += cache_ptr->slist_ring_len[i];
- slist_size += cache_ptr->slist_ring_size[i];
- } /* end for */
-
- HDassert(cache_ptr->index_len == index_len);
- HDassert(cache_ptr->index_size == index_size);
- HDassert(cache_ptr->clean_index_size == clean_index_size);
- HDassert(cache_ptr->dirty_index_size == dirty_index_size);
- HDassert(cache_ptr->slist_len == slist_len);
- HDassert(cache_ptr->slist_size == slist_size);
-#endif /* H5C_DO_SANITY_CHECKS */
-
-#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
-#ifndef NDEBUG
- /* if this is a debug build, set the serialization_count field of
- * each entry in the cache to zero before we start the serialization.
- * This allows us to detect the case in which any entry is serialized
- * more than once (a performance issues), and more importantly, the
- * case is which any flush depencency parent is serializes more than
- * once (a correctness issue).
- */
- {
- H5C_cache_entry_t * scan_ptr = NULL;
-
- scan_ptr = cache_ptr->il_head;
- while(scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- scan_ptr->serialization_count = 0;
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif /* NDEBUG */
-
- /* set cache_ptr->serialization_in_progress to TRUE, and back
- * to FALSE at the end of the function. Must maintain this flag
- * to support H5C_get_serialization_in_progress(), which is in
- * turn required to support sanity checking in some cache
- * clients.
- */
- HDassert(!cache_ptr->serialization_in_progress);
- cache_ptr->serialization_in_progress = TRUE;
-
- /* Serialize each ring, starting from the outermost ring and
- * working inward.
- */
- ring = H5C_RING_USER;
- while(ring < H5C_RING_NTYPES) {
- HDassert(cache_ptr->close_warning_received);
- switch(ring) {
- case H5C_RING_USER:
- break;
-
- case H5C_RING_RDFSM:
- if(!cache_ptr->rdfsm_settled) {
- hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
-
- /* Settle raw data FSM */
- if(H5MF_settle_raw_data_fsm(f, dxpl_id, &fsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
-
- /* Only set the flag if the FSM was actually settled */
- if(fsm_settled)
- cache_ptr->rdfsm_settled = TRUE;
- } /* end if */
- break;
-
- case H5C_RING_MDFSM:
- if(!cache_ptr->mdfsm_settled) {
- hbool_t fsm_settled = FALSE; /* Whether the FSM was actually settled */
-
- /* Settle metadata FSM */
- if(H5MF_settle_meta_data_fsm(f, dxpl_id, &fsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
-
- /* Only set the flag if the FSM was actually settled */
- if(fsm_settled)
- cache_ptr->mdfsm_settled = TRUE;
- } /* end if */
- break;
-
- case H5C_RING_SBE:
- case H5C_RING_SB:
- break;
-
- default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
- break;
- } /* end switch */
-
- if(H5C__serialize_ring(f, dxpl_id, ring) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "serialize ring failed")
-
- ring++;
- } /* end while */
-
-#ifndef NDEBUG
- /* Verify that no entry has been serialized more than once.
- * FD parents with multiple serializations should have been caught
- * elsewhere, so no specific check for them here.
- */
- {
- H5C_cache_entry_t * scan_ptr = NULL;
-
- scan_ptr = cache_ptr->il_head;
- while(scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(scan_ptr->serialization_count <= 1);
-
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif /* NDEBUG */
-
-done:
- cache_ptr->serialization_in_progress = FALSE;
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_cache() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5C__serialize_ring
- *
- * Purpose: Serialize the entries contained in the specified cache and
- * ring. All entries in rings outside the specified ring
- * must have been serialized on entry.
- *
- * If the cache contains protected entries in the specified
- * ring, the function will fail, as protected entries cannot
- * be serialized. However all unprotected entries in the
- * target ring should be serialized before the function
- * returns failure.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the index list
- * serializing entries in flush dependency order.
- *
- * All entries outside the H5C_RING_SBE are marked for
- * inclusion in the cache image. Entries in H5C_RING_SBE
- * and below are marked for exclusion from the image.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/11/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring)
-{
- hbool_t done = FALSE;
- H5C_t * cache_ptr;
- H5C_cache_entry_t * entry_ptr;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_STATIC
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
-
- HDassert(cache_ptr->serialization_in_progress);
-
- /* The objective here is to serialize all entries in the cache ring
- * in flush dependency order.
- *
- * The basic algorithm is to scan the cache index list looking for
- * unserialized entries that are either not in a flush dependency
- * relationship, or which have no unserialized children. Any such
- * entry is serialized and its flush dependency parents (if any) are
- * informed -- allowing them to decrement their userialized child counts.
- *
- * However, this algorithm is complicated by the ability
- * of client serialization callbacks to perform operations on
- * on the cache which can result in the insertion, deletion,
- * relocation, resize, dirty, flush, eviction, or removal (via the
- * take ownership flag) of entries. Changes in the flush dependency
- * structure are also possible.
- *
- * On the other hand, the algorithm is simplified by the fact that
- * we are serializing, not flushing. Thus, as long as all entries
- * are serialized correctly, it doesn't matter if we have to go back
- * and serialize an entry a second time.
- *
- * These possible actions result in the following modfications to
- * tha basic algorithm:
- *
- * 1) In the event of an entry expunge, eviction or removal, we must
- * restart the scan as it is possible that the next entry in our
- * scan is no longer in the cache. Were we to examine this entry,
- * we would be accessing deallocated memory.
- *
- * 2) A resize, dirty, or insertion of an entry may result in the
- * the increment of a flush dependency parent's dirty and/or
- * unserialized child count. In the context of serializing the
- * the cache, this is a non-issue, as even if we have already
- * serialized the parent, it will be marked dirty and its image
- * marked out of date if appropriate when the child is serialized.
- *
- * However, this is a major issue for a flush, as were this to happen
- * in a flush, it would violate the invariant that the flush dependency
- * feature is intended to enforce. As the metadata cache has no
- * control over the behavior of cache clients, it has no way of
- * preventing this behaviour. However, it should detect it if at all
- * possible.
- *
- * Do this by maintaining a count of the number of times each entry is
- * serialized during a cache serialization. If any flush dependency
- * parent is serialized more than once, throw an assertion failure.
- *
- * 3) An entry relocation will typically change the location of the
- * entry in the index list. This shouldn't cause problems as we
- * will scan the index list until we make a complete pass without
- * finding anything to serialize -- making relocations of either
- * the current or next entries irrelevant.
- *
- * Note that since a relocation may result in our skipping part of
- * the index list, we must always do at least one more pass through
- * the index list after an entry relocation.
- *
- * 4) Changes in the flush dependency structure are possible on
- * entry insertion, load, expunge, evict, or remove. Destruction
- * of a flush dependency has no effect, as it can only relax the
- * flush dependencies. Creation of a flush dependency can create
- * an unserialized child of a flush dependency parent where all
- * flush dependency children were previously serialized. Should
- * this child dirty the flush dependency parent when it is serialized,
- * the parent will be re-serialized.
- *
- * Per the discussion of 2) above, this is a non issue for cache
- * serialization, and a major problem for cache flush. Using the
- * same detection mechanism, throw an assertion failure if this
- * condition appears.
- *
- * Observe that either eviction or removal of entries as a result of
- * a serialization is not a problem as long as the flush depencency
- * tree does not change beyond the removal of a leaf.
- */
- while(!done) {
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- done = TRUE; /* set to FALSE if any activity in inner loop */
- entry_ptr = cache_ptr->il_head;
- while(entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
-
- /* Verify that either the entry is already serialized, or
- * that it is assigned to either the target or an inner
- * ring.
- */
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
-
- /* Skip flush me last entries or inner ring entries */
- if(!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
-
- /* if we encounter an unserialized entry in the current
- * ring that is not marked flush me last, we are not done.
- */
- if(!entry_ptr->image_up_to_date)
- done = FALSE;
-
- /* Serialize the entry if its image is not up to date
- * and it has no unserialized flush dependency children.
- */
- if(!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
- HDassert(entry_ptr->serialization_count == 0);
-
- /* Serialize the entry */
- if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
-
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
-
-#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif /* NDEBUG */
- } /* end if */
- } /* end if */
-
- /* Check for the cache being perturbed during the entry serialize */
- if((cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0)) {
-
-#if H5C_COLLECT_CACHE_STATS
- H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- /* Reset the counters */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- /* Restart scan */
- entry_ptr = cache_ptr->il_head;
- } /* end if */
- else
- /* Advance to next entry */
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
- } /* while ( ! done ) */
-
-
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- /* At this point, all entries not marked "flush me last" and in
- * the current ring or outside it should be serialized and have up
- * to date images. Scan the index list again to serialize the
- * "flush me last" entries (if they are in the current ring) and to
- * verify that all other entries have up to date images.
- */
- entry_ptr = cache_ptr->il_head;
- while(entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
- HDassert(entry_ptr->ring < H5C_RING_NTYPES);
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
-
- if(entry_ptr->ring == ring) {
- if(entry_ptr->flush_me_last) {
- if(!entry_ptr->image_up_to_date) {
- HDassert(entry_ptr->serialization_count == 0);
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
-
- /* Serialize the entry */
- if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
-
- /* Check for the cache changing */
- if((cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush_me_last entry serialization triggered restart")
-
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
-#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif /* NDEBUG */
- } /* end if */
- } /* end if */
- else {
- HDassert(entry_ptr->image_up_to_date);
- HDassert(entry_ptr->serialization_count <= 1);
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- } /* end else */
- } /* if ( entry_ptr->ring == ring ) */
-
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
-
-done:
- HDassert(cache_ptr->serialization_in_progress);
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_ring() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5C__serialize_single_entry
- *
- * Purpose: Serialize the cache entry pointed to by the entry_ptr
- * parameter.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer, 7/24/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
- H5C_cache_entry_t *entry_ptr)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_STATIC
-
- /* Sanity checks */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!entry_ptr->prefetched);
- HDassert(!entry_ptr->image_up_to_date);
- HDassert(entry_ptr->is_dirty);
- HDassert(!entry_ptr->is_protected);
- HDassert(!entry_ptr->flush_in_progress);
- HDassert(entry_ptr->type);
-
- /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
- * will not be evicted out from under us. Must set it back to FALSE
- * when we are done.
- */
- entry_ptr->flush_in_progress = TRUE;
-
- /* Allocate buffer for the entry image if required. */
- if(NULL == entry_ptr->image_ptr) {
- HDassert(entry_ptr->size > 0);
- if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)) )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
-#if H5C_DO_MEMORY_SANITY_CHECKS
- HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
-
- /* Generate image for entry */
- if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
-
- /* Reset the flush_in progress flag */
- entry_ptr->flush_in_progress = FALSE;
-
-done:
- HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
- HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_single_entry() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5C__write_cache_image_superblock_msg
*
* Purpose: Write the cache image superblock extension message,
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index 8f8b5c6..ebb98b3 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -206,8 +206,8 @@ H5C_apply_candidate_list(H5F_t * f,
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
char tbl_buf[1024];
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
- unsigned u;
- herr_t ret_value = SUCCEED; /* Return value */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -345,10 +345,11 @@ H5C_apply_candidate_list(H5F_t * f,
} /* end else */
/* Entries marked as collectively accessed and are in the
- candidate list to clear from the cache have to be
- removed from the coll list. This is OK since the
- candidate list is collective and uniform across all
- ranks. */
+ * candidate list to clear from the cache have to be
+ * removed from the coll list. This is OK since the
+ * candidate list is collective and uniform across all
+ * ranks.
+ */
if(entry_ptr->coll_access) {
entry_ptr->coll_access = FALSE;
H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
@@ -806,7 +807,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
if(space_needed > 0) { /* we have work to do */
H5C_cache_entry_t *entry_ptr;
- int nominated_entries_count = 0;
+ unsigned nominated_entries_count = 0;
size_t nominated_entries_size = 0;
haddr_t nominated_addr;
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 26e932f..5b923e9 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -1007,7 +1007,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "Pre HT insert SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT insert SC failed") \
}
#define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
@@ -1029,7 +1029,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "Post HT insert SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT insert SC failed") \
}
#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
@@ -1070,7 +1070,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT remove SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT remove SC failed") \
}
#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
@@ -1096,7 +1096,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Post HT remove SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT remove SC failed") \
}
/* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
@@ -1108,7 +1108,7 @@ if ( ( (cache_ptr) == NULL ) || \
( ! H5F_addr_defined(Addr) ) || \
( H5C__HASH_FCN(Addr) < 0 ) || \
( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "Pre HT search SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT search SC failed") \
}
/* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
@@ -1130,7 +1130,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \
( ( (entry_ptr)->ht_next != NULL ) && \
( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "Post successful HT search SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post successful HT search SC failed") \
}
/* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */
@@ -1138,7 +1138,7 @@ if ( ( (cache_ptr) == NULL ) || \
if ( ( (cache_ptr) == NULL ) || \
( ((cache_ptr)->index)[k] != (entry_ptr) ) || \
( (entry_ptr)->ht_prev != NULL ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "Post HT shift to front SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT shift to front SC failed") \
}
#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
@@ -1173,7 +1173,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT entry size change SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT entry size change SC failed") \
}
#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
@@ -1203,7 +1203,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Post HT entry size change SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT entry size change SC failed") \
}
#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
@@ -1230,7 +1230,7 @@ if ( \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT update for entry clean SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry clean SC failed") \
}
#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
@@ -1257,7 +1257,7 @@ if ( \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT update for entry dirty SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry dirty SC failed") \
}
#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
@@ -1273,7 +1273,7 @@ if ( ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Post HT update for entry clean SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry clean SC failed") \
}
#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
@@ -1289,7 +1289,7 @@ if ( ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Post HT update for entry dirty SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry dirty SC failed") \
}
#else /* H5C_DO_SANITY_CHECKS */
@@ -4675,8 +4675,8 @@ struct H5C_t {
H5SL_t * slist_ptr;
uint32_t num_last_entries;
#if H5C_DO_SANITY_CHECKS
- int64_t slist_len_increase;
- int64_t slist_size_increase;
+ int32_t slist_len_increase;
+ ssize_t slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */
/* Fields for maintaining list of tagged entries */
@@ -4872,7 +4872,7 @@ typedef int (*H5C_tag_iter_cb_t)(H5C_cache_entry_t *entry, void *ctx);
/* Package Private Prototypes */
/******************************/
H5_DLL herr_t H5C__prep_image_for_file_close(H5F_t *f, hid_t dxpl_id);
-H5_DLL herr_t H5C__deserialize_prefetched_entry(H5F_t * f, hid_t dxpl_id,
+H5_DLL herr_t H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id,
H5C_t * cache_ptr, H5C_cache_entry_t** entry_ptr_ptr,
const H5C_class_t * type, haddr_t addr, void * udata);
@@ -4888,6 +4888,7 @@ H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, hid_t dxpl_id,
H5_DLL herr_t H5C__flush_marked_entries(H5F_t * f, hid_t dxpl_id);
H5_DLL herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr,
H5C_cache_entry_t *entry_ptr, hid_t dxpl_id);
+H5_DLL herr_t H5C__serialize_cache(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global,
H5C_tag_iter_cb_t cb, void *cb_ctx);
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 30a7745..28eacf2 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -1661,8 +1661,8 @@ typedef struct H5C_cache_entry_t {
* JRM - 8/5/15
*
* magic: Unsigned 32 bit integer that must always be set to
- * H5C__IMAGE_ENTRY_T_MAGIC when the entry is valid.
- * The field must be set to H5C__IMAGE__ENTRY_T_BAD_MAGIC
+ * H5C_IMAGE_ENTRY_T_MAGIC when the entry is valid.
+ * The field must be set to H5C_IMAGE_ENTRY_T_BAD_MAGIC
* just before the entry is freed.
*
* addr: Base address of the cache entry on disk.
@@ -2291,6 +2291,7 @@ H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t dxpl_id, unsigned ce_arr
#ifndef NDEBUG /* debugging functions */
H5_DLL hbool_t H5C_get_serialization_in_progress(const H5C_t *cache_ptr);
H5_DLL hbool_t H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring);
+H5_DLL herr_t H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn);
H5_DLL herr_t H5C_get_entry_ptr_from_addr(H5C_t *cache_ptr, haddr_t addr,
void **entry_ptr_ptr);
H5_DLL herr_t H5C_flush_dependency_exists(H5C_t *cache_ptr, haddr_t parent_addr,
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index f4ce969..3b86dae 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -861,6 +861,7 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
H5O_loc_t ext_loc; /* Superblock extension object location */
hbool_t need_ext; /* Whether the superblock extension is needed */
hbool_t ext_created = FALSE; /* Whether the extension has been created */
+ hbool_t non_default_fs_settings = FALSE; /* Whether the file has non-default free-space settings */
herr_t ret_value = SUCCEED; /* Return Value */
FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
@@ -887,6 +888,11 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, &sblock->btree_k[0]) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes")
+ /* Check for non-default free-space settings */
+ if(!(f->shared->fs_strategy == H5F_FILE_SPACE_STRATEGY_DEF &&
+ f->shared->fs_threshold == H5F_FREE_SPACE_THRESHOLD_DEF))
+ non_default_fs_settings = TRUE;
+
/* Bump superblock version if latest superblock version support is enabled */
if(H5F_USE_LATEST_FLAGS(f, H5F_LATEST_SUPERBLOCK))
super_vers = HDF5_SUPERBLOCK_VERSION_LATEST;
@@ -896,8 +902,7 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
/* Bump superblock version to create superblock extension for
* non-default file space strategy or non-default free-space threshold
*/
- else if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
- f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF)
+ else if(non_default_fs_settings)
super_vers = HDF5_SUPERBLOCK_VERSION_2;
/* Check for non-default indexed storage B-tree internal 'K' value
* and set the version # of the superblock to 1 if it is a non-default
@@ -1014,8 +1019,7 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
need_ext = TRUE;
} /* end if */
/* Files with non-default free space settings always need the superblock extension */
- else if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
- f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF) {
+ else if(non_default_fs_settings) {
HDassert(super_vers >= HDF5_SUPERBLOCK_VERSION_2);
need_ext = TRUE;
} /* end if */
@@ -1103,9 +1107,8 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
f->shared->drvinfo_sb_msg_exists = TRUE;
} /* end if */
- /* Check for non-default free space settings */
- if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
- f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF) {
+ /* Check for non-default free-space info settings */
+ if(non_default_fs_settings) {
H5FD_mem_t type; /* Memory type for iteration */
H5O_fsinfo_t fsinfo; /* Free space manager info message */
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index 8e5ed76..ffdac9a 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -117,10 +117,10 @@ static herr_t H5HF__cache_dblock_free_icr(void *thing);
/* Debugging Function Prototypes */
#ifndef NDEBUG
static herr_t H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
- H5HF_hdr_t * hdr, hbool_t *fd_clean, hbool_t *clean);
+ H5HF_hdr_t *hdr, hbool_t *fd_clean, hbool_t *clean);
static herr_t H5HF__cache_verify_iblock_descendants_clean(H5F_t *f,
hid_t dxpl_id, haddr_t fd_parent_addr, H5HF_indirect_t *iblock,
- unsigned *iblock_status, hbool_t * fd_clean, hbool_t *clean);
+ unsigned *iblock_status, hbool_t *fd_clean, hbool_t *clean);
static herr_t H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f,
haddr_t fd_parent_addr, H5HF_indirect_t *iblock, hbool_t *fd_clean,
hbool_t *clean, hbool_t *has_dblocks);
diff --git a/test/cache.c b/test/cache.c
index 1cfc0b1..87b1272 100644
--- a/test/cache.c
+++ b/test/cache.c
@@ -33693,14 +33693,10 @@ check_metadata_cork(hbool_t fill_via_insertion)
reset_entries();
- if(fill_via_insertion) {
-
- TESTING("to ensure cork/uncork metadata when inserting");
-
- } else {
-
- TESTING("to ensure cork/uncork metadata on protect/unprotect");
- }
+ if(fill_via_insertion)
+ TESTING("to ensure cork/uncork metadata when inserting")
+ else
+ TESTING("to ensure cork/uncork metadata on protect/unprotect")
if(show_progress) /* 0 */
HDfprintf(stdout, "\n%s: check point %d -- pass %d\n",
diff --git a/test/cache_common.c b/test/cache_common.c
index 81e62ae..d1bbf10 100644
--- a/test/cache_common.c
+++ b/test/cache_common.c
@@ -3235,8 +3235,8 @@ setup_cache(size_t max_cache_size,
if(verbose)
HDfprintf(stdout, "%s: H5Fcreate() failed.\n", FUNC);
- }
- }
+ } /* end if */
+ } /* end if */
if(show_progress) /* 4 */
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
diff --git a/test/cache_image.c b/test/cache_image.c
index 946d46b..de0507b 100644
--- a/test/cache_image.c
+++ b/test/cache_image.c
@@ -6292,7 +6292,7 @@ cache_image_api_error_check_3(void)
if ( H5Fstart_swmr_write(file_id) == SUCCEED ) {
pass = FALSE;
- failure_mssg = "metadata cache image block loaded(1).";
+ failure_mssg = "SWMR start succeeded in file with cache image.";
}
} H5E_END_TRY;
}
diff --git a/test/dsets.c b/test/dsets.c
index d086c58..39b7c22 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -507,7 +507,7 @@ test_simple_io(const char *env_h5_drvr, hid_t fapl)
TESTING("simple I/O");
- /* Can't run this test with multi-file VFDs */
+ /* Can't run this test with multi-file VFDs because of HDopen/read/seek the file directly */
if(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi") && HDstrcmp(env_h5_drvr, "family")) {
h5_fixname(FILENAME[4], fapl, filename, sizeof filename);
@@ -645,7 +645,7 @@ test_userblock_offset(const char *env_h5_drvr, hid_t fapl)
TESTING("dataset offset with user block");
- /* Can't run this test with multi-file VFDs */
+ /* Can't run this test with multi-file VFDs because of HDopen/read/seek the file directly */
if(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi") && HDstrcmp(env_h5_drvr, "family")) {
h5_fixname(FILENAME[2], fapl, filename, sizeof filename);
@@ -1984,10 +1984,11 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
}
}
- PASSED();
-
/* Get the storage size of the dataset */
if((*dset_size=H5Dget_storage_size(dataset))==0) goto error;
+
+ PASSED();
+
/* Clean up objects used for this test */
if(H5Dclose (dataset) < 0) goto error;
if(H5Sclose (sid) < 0) goto error;
@@ -2932,6 +2933,7 @@ test_nbit_int(hid_t file)
PASSED();
return 0;
+
error:
return -1;
}
diff --git a/test/fheap.c b/test/fheap.c
index 8e364de..82859d2 100644
--- a/test/fheap.c
+++ b/test/fheap.c
@@ -16360,6 +16360,7 @@ main(void)
/* Reset library */
h5_reset();
+
fapl = h5_fileaccess();
ExpressMode = GetTestExpress();
if(ExpressMode > 1)
@@ -16379,13 +16380,7 @@ main(void)
shared_wobj_g[u] = (unsigned char)u;
/* Iterate over the testing parameters */
-#ifndef QAK
for(curr_test = FHEAP_TEST_NORMAL; curr_test < FHEAP_TEST_NTESTS; H5_INC_ENUM(fheap_test_type_t, curr_test)) {
-#else /* QAK */
-HDfprintf(stderr, "Uncomment test loop!\n");
-curr_test = FHEAP_TEST_NORMAL;
-/* curr_test = FHEAP_TEST_REOPEN; */
-#endif /* QAK */
/* Clear the testing parameters */
HDmemset(&tparam, 0, sizeof(fheap_test_param_t));
tparam.actual_id_len = HEAP_ID_LEN;
@@ -16410,7 +16405,6 @@ curr_test = FHEAP_TEST_NORMAL;
} /* end switch */
/* Test fractal heap creation */
-#ifndef QAK
nerrors += test_create(fapl, &small_cparam, &tparam);
nerrors += test_reopen(fapl, &small_cparam, &tparam);
nerrors += test_open_twice(fapl, &small_cparam, &tparam);
@@ -16419,23 +16413,12 @@ curr_test = FHEAP_TEST_NORMAL;
nerrors += test_filtered_create(fapl, &small_cparam);
nerrors += test_size(fapl, &small_cparam);
nerrors += test_reopen_hdr(fapl, &small_cparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK2
-#ifndef QAK
{
fheap_test_fill_t fill; /* Size of objects to fill heap blocks with */
-#ifndef QAK2
/* Filling with different sized objects */
for(fill = FHEAP_TEST_FILL_LARGE; fill < FHEAP_TEST_FILL_N; H5_INC_ENUM(fheap_test_fill_t, fill)) {
-#else /* QAK2 */
-HDfprintf(stderr, "Uncomment test loop!\n");
-fill = FHEAP_TEST_FILL_LARGE;
-/* fill = FHEAP_TEST_FILL_SINGLE; */
-#endif /* QAK2 */
tparam.fill = fill;
/* Set appropriate testing parameters for each test */
@@ -16460,12 +16443,8 @@ fill = FHEAP_TEST_FILL_LARGE;
* Test fractal heap managed object insertion
*/
-#ifndef QAK
/* "Weird" sized objects */
nerrors += test_man_insert_weird(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
#ifdef ALL_INSERT_TESTS
/* "Standard" sized objects, building from simple to complex heaps */
@@ -16501,20 +16480,15 @@ HDfprintf(stderr, "Uncomment tests!\n");
/* If this test fails, uncomment the tests above, which build up to this
* level of complexity gradually. -QAK
*/
-#ifndef QAK
if(ExpressMode > 1)
printf("***Express test mode on. test_man_start_5th_recursive_indirect is skipped\n");
else
nerrors += test_man_start_5th_recursive_indirect(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
/*
* Test fractal heap object deletion
*/
/* Simple removal */
-#ifndef QAK
nerrors += test_man_remove_bogus(fapl, &small_cparam, &tparam);
nerrors += test_man_remove_one(fapl, &small_cparam, &tparam);
nerrors += test_man_remove_two(fapl, &small_cparam, &tparam);
@@ -16531,12 +16505,7 @@ HDfprintf(stderr, "Uncomment tests!\n");
/* Incremental insert & removal */
tparam.del_dir = FHEAP_DEL_FORWARD;
nerrors += test_man_incr_insert_remove(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK
-#ifndef QAK2
{
fheap_test_del_dir_t del_dir; /* Deletion direction */
fheap_test_del_drain_t drain_half; /* Deletion draining */
@@ -16546,19 +16515,10 @@ HDfprintf(stderr, "Uncomment tests!\n");
tparam.del_dir = del_dir;
for(drain_half = FHEAP_DEL_DRAIN_ALL; drain_half < FHEAP_DEL_DRAIN_N; H5_INC_ENUM(fheap_test_del_drain_t, drain_half)) {
tparam.drain_half = drain_half;
-#else /* QAK2 */
-HDfprintf(stderr, "Uncomment test loops!\n");
-/* tparam.del_dir = FHEAP_DEL_FORWARD; */
-/* tparam.del_dir = FHEAP_DEL_REVERSE; */
-tparam.del_dir = FHEAP_DEL_HEAP;
-tparam.drain_half = FHEAP_DEL_DRAIN_ALL;
-/* tparam.drain_half = FHEAP_DEL_DRAIN_HALF; */
-#endif /* QAK2 */
/* Don't need to test deletion directions when deleting entire heap */
if(tparam.del_dir == FHEAP_DEL_HEAP && tparam.drain_half > FHEAP_DEL_DRAIN_ALL)
break;
-#ifndef QAK
/* Simple insertion patterns */
nerrors += test_man_remove_root_direct(fapl, &small_cparam, &tparam);
nerrors += test_man_remove_two_direct(fapl, &small_cparam, &tparam);
@@ -16572,11 +16532,7 @@ tparam.drain_half = FHEAP_DEL_DRAIN_ALL;
nerrors += test_man_remove_2nd_indirect(fapl, &small_cparam, &tparam);
nerrors += test_man_remove_3rd_indirect(fapl, &small_cparam, &tparam);
} /* end else */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK
/* Skip blocks insertion */
/* (covers insertion & deletion of skipped blocks) */
nerrors += test_man_skip_start_block(fapl, &small_cparam, &tparam);
@@ -16608,21 +16564,13 @@ HDfprintf(stderr, "Uncomment tests!\n");
nerrors += test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
nerrors += test_man_fill_4th_direct_less_one_fill_2nd_direct_fill_direct_skip_3rd_indirect_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
} /* end else */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK
/* Fragmented insertion patterns */
/* (covers insertion & deletion of fragmented blocks) */
nerrors += test_man_frag_simple(fapl, &small_cparam, &tparam);
nerrors += test_man_frag_direct(fapl, &small_cparam, &tparam);
nerrors += test_man_frag_2nd_direct(fapl, &small_cparam, &tparam);
nerrors += test_man_frag_3rd_direct(fapl, &small_cparam, &tparam);
-#else /* QAK */
- HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK2
} /* end for */
} /* end for */
@@ -16630,22 +16578,12 @@ HDfprintf(stderr, "Uncomment tests!\n");
tparam.drain_half = FHEAP_DEL_DRAIN_ALL;
} /* end block */
-#endif /* QAK2 */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK2
} /* end for */
-#endif /* QAK2 */
} /* end block */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
/*
* Test fractal heap 'huge' & 'tiny' object insertion & deletion
*/
-#ifndef QAK
{
fheap_test_del_dir_t del_dir; /* Deletion direction */
unsigned id_len; /* Length of heap IDs */
@@ -16687,24 +16625,16 @@ HDfprintf(stderr, "Uncomment tests!\n");
tparam.del_dir = del_dir;
/* Test 'huge' object insert & delete */
-#ifndef QAK
nerrors += test_huge_insert_one(fapl, &small_cparam, &tparam);
nerrors += test_huge_insert_two(fapl, &small_cparam, &tparam);
nerrors += test_huge_insert_three(fapl, &small_cparam, &tparam);
nerrors += test_huge_insert_mix(fapl, &small_cparam, &tparam);
nerrors += test_filtered_huge(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK
/* Test 'tiny' object insert & delete */
nerrors += test_tiny_insert_one(fapl, &small_cparam, &tparam);
nerrors += test_tiny_insert_two(fapl, &small_cparam, &tparam);
nerrors += test_tiny_insert_mix(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
} /* end for */
} /* end for */
@@ -16712,16 +16642,9 @@ HDfprintf(stderr, "Uncomment tests!\n");
small_cparam.id_len = 0;
tparam.actual_id_len = HEAP_ID_LEN;
} /* end block */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#else /* QAK2 */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK2 */
/* Test I/O filter support */
-#ifndef QAK
/* Try several different methods of deleting objects */
{
fheap_test_del_dir_t del_dir; /* Deletion direction */
@@ -16743,16 +16666,11 @@ HDfprintf(stderr, "Uncomment tests!\n");
tparam.comp = FHEAP_TEST_NO_COMPRESS;
} /* end for */
} /* end block */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK
/* Random object insertion & deletion */
if(ExpressMode > 1)
printf("***Express test mode on. Some tests skipped\n");
else {
-#ifndef QAK
/* Random tests using "small" heap creation parameters */
puts("Using 'small' heap creation parameters");
@@ -16765,11 +16683,7 @@ HDfprintf(stderr, "Uncomment tests!\n");
tparam.del_dir = FHEAP_DEL_HEAP;
nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &small_cparam, &tparam);
nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK
/* Random tests using "large" heap creation parameters */
puts("Using 'large' heap creation parameters");
tparam.actual_id_len = LARGE_HEAP_ID_LEN;
@@ -16783,18 +16697,11 @@ HDfprintf(stderr, "Uncomment tests!\n");
tparam.del_dir = FHEAP_DEL_HEAP;
nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &large_cparam, &tparam);
nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &large_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
/* Reset the "normal" heap ID length */
tparam.actual_id_len = SMALL_HEAP_ID_LEN;
} /* end else */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK
/* Test object writing support */
/* Basic object writing */
@@ -16806,19 +16713,10 @@ HDfprintf(stderr, "Uncomment tests!\n");
/* Reset block compression */
tparam.comp = FHEAP_TEST_NO_COMPRESS;
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK
} /* end for */
-#endif /* QAK */
/* Tests that address specific bugs */
-#ifndef QAK
nerrors += test_bug1(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
/* Verify symbol table messages are cached */
nerrors += (h5_verify_cached_stabs(FILENAME, fapl) < 0 ? 1 : 0);
diff --git a/test/fillval.c b/test/fillval.c
index 4f7adc1..6eb3565 100644
--- a/test/fillval.c
+++ b/test/fillval.c
@@ -2370,7 +2370,7 @@ main(int argc, char *argv[])
{
int nerrors=0, argno, test_contig=1, test_chunk=1, test_compact=1;
hid_t fapl = (-1), fapl2 = (-1); /* File access property lists */
- unsigned new_format; /* Whether to use the new format or not */
+ unsigned new_format; /* Whether to use the new format or not */
if(argc >= 2) {
test_contig = test_chunk = test_compact = 0;
diff --git a/test/freespace.c b/test/freespace.c
index d963a6e..181e6a1 100644
--- a/test/freespace.c
+++ b/test/freespace.c
@@ -1995,7 +1995,8 @@ test_fs_sect_shrink(hid_t fapl)
(hsize_t)(TEST_SECT_SIZE50), (H5FS_section_info_t **)&node)) < 0)
FAIL_STACK_ERROR
- if (node_found) TEST_ERROR
+ if (node_found)
+ TEST_ERROR
if(check_stats(f, frsp, &state))
TEST_ERROR
@@ -2095,7 +2096,8 @@ test_fs_sect_shrink(hid_t fapl)
(hsize_t)(TEST_SECT_SIZE50), (H5FS_section_info_t **)&node)) < 0)
FAIL_STACK_ERROR
- if (node_found) TEST_ERROR
+ if (node_found)
+ TEST_ERROR
/* section A should not be there in free-space */
if((node_found = H5FS_sect_find(f, dxpl_id, frsp,
@@ -2238,8 +2240,8 @@ test_fs_sect_change_class(hid_t fapl)
TEST_ERROR
if (H5FS_sect_change_class(f, dxpl_id, frsp, (H5FS_section_info_t *)sect_node1,
- TEST_FSPACE_SECT_TYPE_NONE) < 0)
- TEST_ERROR
+ TEST_FSPACE_SECT_TYPE_NONE) < 0)
+ TEST_ERROR
state.serial_sect_count += 1;
state.ghost_sect_count -=1;
diff --git a/test/links.c b/test/links.c
index 3aff68b..3364c7e 100644
--- a/test/links.c
+++ b/test/links.c
@@ -22,7 +22,7 @@
/*
* This file needs to access private information from the H5FD package.
- * This file also needs to access the group testing code.
+ * This file also needs to access the file driver testing code.
*/
#define H5FD_FRIEND /*suppress error about including H5FDpkg */
#define H5FD_TESTING
diff --git a/test/objcopy.c b/test/objcopy.c
index 4166284..0711fb0 100644
--- a/test/objcopy.c
+++ b/test/objcopy.c
@@ -7200,7 +7200,7 @@ test_copy_ext_link(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst_fap
if(H5Gclose(gid) < 0) TEST_ERROR
/* create file to hold external links to the src file */
- if((fid_ext = H5Fcreate(ext_filename, H5F_ACC_TRUNC, H5P_DEFAULT, src_fapl)) < 0) TEST_ERROR
+ if((fid_ext = H5Fcreate(ext_filename, H5F_ACC_TRUNC, fcpl_src, src_fapl)) < 0) TEST_ERROR
/* create group in the file that will hold the external link */
if((gid = H5Gcreate2(fid_ext, NAME_GROUP_LINK, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR
diff --git a/test/testflushrefresh.sh.in b/test/testflushrefresh.sh.in
index 7bfeb60..83b7134 100644
--- a/test/testflushrefresh.sh.in
+++ b/test/testflushrefresh.sh.in
@@ -118,7 +118,7 @@ until [ $verification_done -eq 1 ]; do
# Check to see if we timed out looking for the signal before continuing.
if [ $timedout -gt 0 ]; then
- echo timed out waiting for signal from test program.
+ echo "timed out waiting for signal from test program (flush)."
break
fi
@@ -158,7 +158,7 @@ if [ $timedout -eq 0 ]; then
# Check to see if we timed out looking for the signal before continuing.
if [ $timedout -gt 0 ]; then
- echo timed out waiting for signal from test program.
+ echo "timed out waiting for signal from test program (refresh)."
break
fi
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 0e69557..bfa4c8f 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -4334,8 +4334,7 @@ setup_cache_for_test(hid_t * fid_ptr,
*
*****************************************************************************/
static void
-verify_writes(unsigned num_writes,
- haddr_t * written_entries_tbl)
+verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
{
const hbool_t report = FALSE;
hbool_t proceed = TRUE;
@@ -4361,13 +4360,10 @@ verify_writes(unsigned num_writes,
}
}
- if ( proceed ) {
-
+ if(proceed)
proceed = verify_total_writes(num_writes);
- }
- while ( ( proceed ) && ( u < num_writes ) )
- {
+ while(proceed && u < num_writes) {
proceed = verify_entry_writes(written_entries_tbl[u], 1);
u++;
}
diff --git a/tools/test/h5format_convert/h5fc_gentest.c b/tools/test/h5format_convert/h5fc_gentest.c
index fea7eed..9ef8e6e 100644
--- a/tools/test/h5format_convert/h5fc_gentest.c
+++ b/tools/test/h5format_convert/h5fc_gentest.c
@@ -44,8 +44,6 @@ const char *FILENAME[] = {
#define GROUP "GROUP"
-#define DSET_BT1 "DSET_BT1"
-#define DSET_NDATA_BT1 "DSET_NDATA_BT1"
#define DSET_COMPACT "DSET_COMPACT"
#define DSET_CONTIGUOUS "DSET_CONTIGUOUS"
@@ -770,6 +768,7 @@ error:
H5Dclose(did2);
H5Gclose(gid);
H5Fclose(fid);
+ H5Pclose(fapl);
H5Pclose(fcpl);
} H5E_END_TRY;