summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@lbl.gov>2017-03-13 19:04:33 (GMT)
committerQuincey Koziol <koziol@lbl.gov>2017-03-13 19:04:33 (GMT)
commit847d675f2743ec420ef6c9efdd6e52ae93b4fe44 (patch)
tree3e74669322711c4ea14f8fd80ecf14da984c6212
parent56e5b4ed05fb3bad51c226ec5b567871a45e0bce (diff)
parent4fad103187db9095afc8eab90b5f5544feb1d19b (diff)
downloadhdf5-847d675f2743ec420ef6c9efdd6e52ae93b4fe44.zip
hdf5-847d675f2743ec420ef6c9efdd6e52ae93b4fe44.tar.gz
hdf5-847d675f2743ec420ef6c9efdd6e52ae93b4fe44.tar.bz2
Merge pull request #333 in HDFFV/hdf5 from merge_page_buffering_07 to develop
* commit '4fad103187db9095afc8eab90b5f5544feb1d19b': Bring changes to I/O parameters from page_buffering branch. Merge in reentrency changes to "make space in cache" from page_buffering branch. Minor cleanups and bring over "prefetched dirty" fixes for entries loaded from a cache image. Remove some usage of "prefetched_dirty" flag (which hasn't been merged from the page_buffering branch yet. Also, bring over improvements to flush candidate entries for parallel code. Align with incoming page buffering changes: minor cleanups, centralize removing entries from collective metadata read list
-rw-r--r--src/H5AC.c9
-rw-r--r--src/H5ACdbg.c2
-rw-r--r--src/H5ACprivate.h2
-rw-r--r--src/H5C.c126
-rw-r--r--src/H5Cdbg.c106
-rw-r--r--src/H5Cimage.c134
-rw-r--r--src/H5Cmpio.c990
-rw-r--r--src/H5Cpkg.h30
-rw-r--r--src/H5Cprivate.h58
-rw-r--r--src/H5Ctag.c2
-rw-r--r--src/H5F.c24
-rw-r--r--src/H5FD.c42
-rw-r--r--src/H5FDint.c59
-rw-r--r--src/H5FDprivate.h32
-rw-r--r--src/H5Faccum.c88
-rw-r--r--src/H5Fint.c134
-rw-r--r--src/H5Fio.c64
-rw-r--r--src/H5Fmount.c10
-rw-r--r--src/H5Fpkg.h22
-rw-r--r--src/H5Fprivate.h12
-rw-r--r--src/H5Fsuper.c51
-rw-r--r--src/H5MF.c16
-rw-r--r--src/H5Z.c2
-rw-r--r--test/accum.c59
-rw-r--r--test/cache.c102
-rw-r--r--test/cache_image.c649
-rw-r--r--test/cache_tagging.c58
-rw-r--r--test/evict_on_close.c1
28 files changed, 2060 insertions, 824 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index ee68a6f..2fb7992 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -501,7 +501,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
/* Turn on metadata cache logging, if being used */
if(H5F_USE_MDC_LOGGING(f)) {
if(H5C_set_up_logging(f->shared->cache, H5F_MDC_LOG_LOCATION(f), H5F_START_MDC_LOG_ON_ACCESS(f)) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "mdc logging setup failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINIT, FAIL, "mdc logging setup failed")
/* Write the log header regardless of current logging status */
if(H5AC__write_create_cache_log_msg(f->shared->cache) < 0)
@@ -510,9 +510,9 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
/* Set the cache parameters */
if(H5AC_set_cache_auto_resize_config(f->shared->cache, config_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "auto resize configuration failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "auto resize configuration failed")
- /* don't need to get the current H5C image config here since the
+ /* Don't need to get the current H5C image config here since the
* cache has just been created, and thus f->shared->cache->image_ctl
* must still set to its initial value (H5C__DEFAULT_CACHE_IMAGE_CTL).
* Note that this not true as soon as control returns to the application
@@ -522,9 +522,8 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
int_ci_config.generate_image = image_config_ptr->generate_image;
int_ci_config.save_resize_status = image_config_ptr->save_resize_status;
int_ci_config.entry_ageout = image_config_ptr->entry_ageout;
-
if(H5C_set_cache_image_config(f, f->shared->cache, &int_ci_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "auto resize configuration failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "auto resize configuration failed")
done:
#ifdef H5_HAVE_PARALLEL
diff --git a/src/H5ACdbg.c b/src/H5ACdbg.c
index 8ca5102..6073288 100644
--- a/src/H5ACdbg.c
+++ b/src/H5ACdbg.c
@@ -101,6 +101,7 @@ H5AC_stats(const H5F_t *f)
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5AC_stats() */
+#ifndef NDEBUG
/*-------------------------------------------------------------------------
* Function: H5AC_dump_cache
@@ -133,6 +134,7 @@ H5AC_dump_cache(const H5F_t *f)
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_dump_cache() */
+#endif /* NDEBUG */
/*-------------------------------------------------------------------------
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 1fe6456..1dc8270 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -482,8 +482,8 @@ H5_DLL herr_t H5AC_add_candidate(H5AC_t * cache_ptr, haddr_t addr);
/* Debugging functions */
H5_DLL herr_t H5AC_stats(const H5F_t *f);
-H5_DLL herr_t H5AC_dump_cache(const H5F_t *f);
#ifndef NDEBUG
+H5_DLL herr_t H5AC_dump_cache(const H5F_t *f);
H5_DLL herr_t H5AC_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr,
void **entry_ptr_ptr);
H5_DLL herr_t H5AC_flush_dependency_exists(H5F_t *f, haddr_t parent_addr,
diff --git a/src/H5C.c b/src/H5C.c
index 805b4f5..1759292 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -386,6 +386,7 @@ H5C_create(size_t max_cache_size,
cache_ptr->cache_full = FALSE;
cache_ptr->size_decreased = FALSE;
cache_ptr->resize_in_progress = FALSE;
+ cache_ptr->msic_in_progress = FALSE;
(cache_ptr->resize_ctl).version = H5C__CURR_AUTO_SIZE_CTL_VER;
(cache_ptr->resize_ctl).rpt_fcn = NULL;
@@ -925,12 +926,6 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected")
if(entry_ptr->is_pinned)
HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned")
-#ifdef H5_HAVE_PARALLEL
- if(entry_ptr->coll_access) {
- entry_ptr->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
/* If we get this far, call H5C__flush_single_entry() with the
* H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG.
@@ -1465,10 +1460,15 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->prefetched = FALSE;
entry_ptr->prefetch_type_id = 0;
entry_ptr->age = 0;
+ entry_ptr->prefetched_dirty = FALSE;
#ifndef NDEBUG /* debugging field */
entry_ptr->serialization_count = 0;
#endif /* NDEBUG */
+ entry_ptr->tl_next = NULL;
+ entry_ptr->tl_prev = NULL;
+ entry_ptr->tag_info = NULL;
+
/* Apply tag to newly inserted entry */
if(H5C__tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
@@ -2437,7 +2437,7 @@ H5C_protect(H5F_t * f,
marked as collective, and is clean, it is possible that
other processes will not have it in its cache and will
expect a bcast of the entry from process 0. So process 0
- will bcast the entry to all other ranks. Ranks that do have
+ will bcast the entry to all other ranks. Ranks that _do_ have
the entry in their cache still have to participate in the
bcast. */
#ifdef H5_HAVE_PARALLEL
@@ -4586,7 +4586,7 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
( (entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID ) &&
( bytes_evicted < eviction_size_limit ) )
{
- hbool_t corked = FALSE;
+ hbool_t skipping_entry = FALSE;
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert( ! (entry_ptr->is_protected) );
@@ -4600,9 +4600,11 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
prev_is_dirty = prev_ptr->is_dirty;
if(entry_ptr->is_dirty ) {
+ HDassert(!entry_ptr->prefetched_dirty);
+
/* dirty corked entry is skipped */
if(entry_ptr->tag_info && entry_ptr->tag_info->corked)
- corked = TRUE;
+ skipping_entry = TRUE;
else {
/* reset entries_removed_counter and
* last_entry_removed_ptr prior to the call to
@@ -4622,16 +4624,22 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
restart_scan = TRUE;
} /* end else */
} /* end if */
- else {
+ else if(!entry_ptr->prefetched_dirty) {
bytes_evicted += entry_ptr->size;
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 )
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- }
+ } /* end else-if */
+ else {
+ HDassert(!entry_ptr->is_dirty);
+ HDassert(entry_ptr->prefetched_dirty);
+
+ skipping_entry = TRUE;
+ } /* end else */
if(prev_ptr != NULL) {
- if(corked) /* dirty corked entry is skipped */
+ if(skipping_entry)
entry_ptr = prev_ptr;
else if(restart_scan || (prev_ptr->is_dirty != prev_is_dirty)
|| (prev_ptr->next != next_ptr)
@@ -4691,10 +4699,10 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
prev_ptr = entry_ptr->prev;
- if(!(entry_ptr->is_dirty)) {
+ if(!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty))
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
- } /* end if */
+
/* just skip the entry if it is dirty, as we can't do
* anything with it now since we can't write.
*
@@ -6033,10 +6041,6 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
else
destroy_entry = destroy;
-#ifdef H5_HAVE_PARALLEL
- HDassert(FALSE == entry_ptr->coll_access);
-#endif
-
/* we will write the entry to disk if it exists, is dirty, and if the
* clear only flag is not set.
*/
@@ -6232,9 +6236,11 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
*
* 2) Delete it from the skip list if requested.
*
- * 3) Update the replacement policy for eviction
+ * 3) Delete it from the collective read access list.
*
- * 4) Remove it from the tag list for this object
+ * 4) Update the replacement policy for eviction
+ *
+ * 5) Remove it from the tag list for this object
*
* Finally, if the destroy_entry flag is set, discard the
* entry.
@@ -6244,6 +6250,14 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
if(entry_ptr->in_slist && del_from_slist_on_destroy)
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
+#ifdef H5_HAVE_PARALLEL
+ /* Check for collective read access flag */
+ if(entry_ptr->coll_access) {
+ entry_ptr->coll_access = FALSE;
+ H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+
H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
/* Remove entry from tag list */
@@ -6290,7 +6304,8 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared")
/* Propagate the clean flag up the flush dependency chain if appropriate */
- HDassert(entry_ptr->flush_dep_ndirty_children == 0);
+ if(entry_ptr->flush_dep_ndirty_children != 0)
+ HDassert(entry_ptr->flush_dep_ndirty_children == 0);
if(entry_ptr->flush_dep_nparents > 0)
if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep clean flag")
@@ -6822,10 +6837,15 @@ H5C_load_entry(H5F_t * f,
entry->prefetched = FALSE;
entry->prefetch_type_id = 0;
entry->age = 0;
+ entry->prefetched_dirty = FALSE;
#ifndef NDEBUG /* debugging field */
entry->serialization_count = 0;
#endif /* NDEBUG */
+ entry->tl_next = NULL;
+ entry->tl_prev = NULL;
+ entry->tag_info = NULL;
+
H5C__RESET_CACHE_ENTRY_STATS(entry);
ret_value = thing;
@@ -6864,13 +6884,6 @@ done:
* Thus the function simply does its best, returning success
* unless an error is encountered.
*
- * The primary_dxpl_id and secondary_dxpl_id parameters
- * specify the dxpl_ids used on the first write occasioned
- * by the call (primary_dxpl_id), and on all subsequent
- * writes (secondary_dxpl_id). This is useful in the metadata
- * cache, but may not be needed elsewhere. If so, just use the
- * same dxpl_id for both parameters.
- *
* Observe that this function cannot occasion a read.
*
* Return: Non-negative on success/Negative on failure.
@@ -6886,11 +6899,13 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
H5C_t * cache_ptr = f->shared->cache;
#if H5C_COLLECT_CACHE_STATS
int32_t clean_entries_skipped = 0;
+ int32_t dirty_pf_entries_skipped = 0;
int32_t total_entries_scanned = 0;
#endif /* H5C_COLLECT_CACHE_STATS */
uint32_t entries_examined = 0;
uint32_t initial_list_len;
size_t empty_space;
+ hbool_t reentrant_call = FALSE;
hbool_t prev_is_dirty = FALSE;
hbool_t didnt_flush_entry = FALSE;
hbool_t restart_scan;
@@ -6908,6 +6923,18 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
+ /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this
+ * is a re-entrant call via a client callback called in the make
+ * space in cache process. To avoid an infinite recursion, set
+ * reentrant_call to TRUE, and goto done.
+ */
+ if(cache_ptr->msic_in_progress) {
+ reentrant_call = TRUE;
+ HGOTO_DONE(SUCCEED);
+ } /* end if */
+
+ cache_ptr->msic_in_progress = TRUE;
+
if ( write_permitted ) {
restart_scan = FALSE;
initial_list_len = cache_ptr->LRU_list_len;
@@ -6954,7 +6981,8 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
didnt_flush_entry = TRUE;
} else if ( ( (entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID ) &&
- ( ! entry_ptr->flush_in_progress ) ) {
+ ( ! entry_ptr->flush_in_progress ) &&
+ ( ! entry_ptr->prefetched_dirty ) ) {
didnt_flush_entry = FALSE;
@@ -6980,13 +7008,6 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
cache_ptr->entries_removed_counter = 0;
cache_ptr->last_entry_removed_ptr = NULL;
-#ifdef H5_HAVE_PARALLEL
- if(TRUE == entry_ptr->coll_access) {
- entry_ptr->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- } /* end if */
-#endif
-
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
@@ -7020,10 +7041,16 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
} else {
- /* Skip epoch markers and entries that are in the process
- * of being flushed.
+ /* Skip epoch markers, entries that are in the process
+ * of being flushed, and entries marked as prefetched_dirty
+ * (occurs in the R/O case only).
*/
didnt_flush_entry = TRUE;
+
+#if H5C_COLLECT_CACHE_STATS
+ if(entry_ptr->prefetched_dirty)
+ dirty_pf_entries_skipped++;
+#endif /* H5C_COLLECT_CACHE_STATS */
}
if ( prev_ptr != NULL ) {
@@ -7088,6 +7115,7 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
cache_ptr->calls_to_msic++;
cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped;
+ cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped;
cache_ptr->total_entries_scanned_in_msic += total_entries_scanned;
if ( clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic ) {
@@ -7095,6 +7123,9 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped;
}
+ if(dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic)
+ cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped;
+
if ( total_entries_scanned > cache_ptr->max_entries_scanned_in_msic ) {
cache_ptr->max_entries_scanned_in_msic = total_entries_scanned;
@@ -7163,6 +7194,12 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
}
done:
+ /* Sanity checks */
+ HDassert(cache_ptr->msic_in_progress);
+ if(!reentrant_call)
+ cache_ptr->msic_in_progress = FALSE;
+ HDassert((!reentrant_call) || (cache_ptr->msic_in_progress));
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__make_space_in_cache() */
@@ -8723,12 +8760,21 @@ H5C_remove_entry(void *_entry)
/* Update the cache internal data structures as appropriate for a destroy.
* Specifically:
* 1) Delete it from the index
- * 2) Update the replacement policy for eviction
- * 3) Remove it from the tag list for this object
+ * 2) Delete it from the collective read access list
+ * 3) Update the replacement policy for eviction
+ * 4) Remove it from the tag list for this object
*/
H5C__DELETE_FROM_INDEX(cache, entry, FAIL)
+#ifdef H5_HAVE_PARALLEL
+ /* Check for collective read access flag */
+ if(entry->coll_access) {
+ entry->coll_access = FALSE;
+ H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+
H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL)
/* Remove entry from tag list */
diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c
index eb5f123..a955eaf 100644
--- a/src/H5Cdbg.c
+++ b/src/H5Cdbg.c
@@ -70,6 +70,7 @@
/*******************/
+#ifndef NDEBUG
/*-------------------------------------------------------------------------
* Function: H5C_dump_cache
@@ -175,6 +176,85 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_dump_cache() */
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_dump_cache_LRU
+ *
+ * Purpose: Print a summary of the contents of the metadata cache
+ * LRU for debugging purposes.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 10/10/10
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name)
+{
+ H5C_cache_entry_t * entry_ptr;
+ int i = 0;
+
+ FUNC_ENTER_NOAPI_NOERR
+
+ /* Sanity check */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_name != NULL );
+
+ HDfprintf(stdout, "\n\nDump of metadata cache LRU \"%s\"\n", cache_name);
+ HDfprintf(stdout, "LRU len = %d, LRU size = %d\n",
+ cache_ptr->LRU_list_len, (int)(cache_ptr->LRU_list_size));
+ HDfprintf(stdout, "index_size = %d, max_cache_size = %d, delta = %d\n\n",
+ (int)(cache_ptr->index_size), (int)(cache_ptr->max_cache_size),
+ (int)(cache_ptr->max_cache_size) - (int)(cache_ptr->index_size));
+
+ /* Print header */
+ HDfprintf(stdout, "Entry ");
+ HDfprintf(stdout, "| Address ");
+ HDfprintf(stdout, "| Tag ");
+ HDfprintf(stdout, "| Size ");
+ HDfprintf(stdout, "| Ring ");
+ HDfprintf(stdout, "| Type ");
+ HDfprintf(stdout, "| Dirty");
+ HDfprintf(stdout, "\n");
+
+ HDfprintf(stdout, "----------------------------------------------------------------------------------------------------------------\n");
+
+ entry_ptr = cache_ptr->LRU_head_ptr;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Print entry */
+ HDfprintf(stdout, "%s%5d ", cache_ptr->prefix, i);
+ HDfprintf(stdout, " 0x%16llx ", (long long)(entry_ptr->addr));
+
+ if(NULL == entry_ptr->tag_info)
+ HDfprintf(stdout, " %16s ", "N/A");
+ else
+ HDfprintf(stdout, " 0x%16llx ",
+ (long long)(entry_ptr->tag_info->tag));
+
+ HDfprintf(stdout, " %5lld ", (long long)(entry_ptr->size));
+ HDfprintf(stdout, " %d ", (int)(entry_ptr->ring));
+ HDfprintf(stdout, " %2d %-32s ", (int)(entry_ptr->type->id),
+ (entry_ptr->type->name));
+ HDfprintf(stdout, " %d", (int)(entry_ptr->is_dirty));
+ HDfprintf(stdout, "\n");
+
+ i++;
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+
+ HDfprintf(stdout, "----------------------------------------------------------------------------------------------------------------\n");
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5C_dump_cache_LRU() */
+#endif /* NDEBUG */
/*-------------------------------------------------------------------------
@@ -385,6 +465,7 @@ H5C_stats(H5C_t * cache_ptr,
double average_successful_search_depth = 0.0f;
double average_failed_search_depth = 0.0f;
double average_entries_skipped_per_calls_to_msic = 0.0f;
+ double average_dirty_pf_entries_skipped_per_call_to_msic = 0.0f;
double average_entries_scanned_per_calls_to_msic = 0.0f;
#endif /* H5C_COLLECT_CACHE_STATS */
herr_t ret_value = SUCCEED; /* Return value */
@@ -620,6 +701,17 @@ H5C_stats(H5C_t * cache_ptr,
(long)(cache_ptr->max_entries_skipped_in_msic));
if(cache_ptr->calls_to_msic > 0)
+ average_dirty_pf_entries_skipped_per_call_to_msic =
+ (((double)(cache_ptr->total_dirty_pf_entries_skipped_in_msic)) /
+ ((double)(cache_ptr->calls_to_msic)));
+
+ HDfprintf(stdout,
+ "%s MSIC: Average/max dirty pf entries skipped = %lf / %ld\n",
+ cache_ptr->prefix,
+ average_dirty_pf_entries_skipped_per_call_to_msic,
+ (long)(cache_ptr->max_dirty_pf_entries_skipped_in_msic));
+
+ if(cache_ptr->calls_to_msic > 0)
average_entries_scanned_per_calls_to_msic =
(((double)(cache_ptr->total_entries_scanned_in_msic)) /
((double)(cache_ptr->calls_to_msic)));
@@ -887,12 +979,14 @@ H5C_stats__reset(H5C_t H5_ATTR_UNUSED * cache_ptr)
cache_ptr->max_pel_len = 0;
cache_ptr->max_pel_size = (size_t)0;
- cache_ptr->calls_to_msic = 0;
- cache_ptr->total_entries_skipped_in_msic = 0;
- cache_ptr->total_entries_scanned_in_msic = 0;
- cache_ptr->max_entries_skipped_in_msic = 0;
- cache_ptr->max_entries_scanned_in_msic = 0;
- cache_ptr->entries_scanned_to_make_space = 0;
+ cache_ptr->calls_to_msic = 0;
+ cache_ptr->total_entries_skipped_in_msic = 0;
+ cache_ptr->total_dirty_pf_entries_skipped_in_msic = 0;
+ cache_ptr->total_entries_scanned_in_msic = 0;
+ cache_ptr->max_entries_skipped_in_msic = 0;
+ cache_ptr->max_dirty_pf_entries_skipped_in_msic = 0;
+ cache_ptr->max_entries_scanned_in_msic = 0;
+ cache_ptr->entries_scanned_to_make_space = 0;
cache_ptr->slist_scan_restarts = 0;
cache_ptr->LRU_scan_restarts = 0;
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
index 1da2545..3a21137 100644
--- a/src/H5Cimage.c
+++ b/src/H5Cimage.c
@@ -489,7 +489,6 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
*/
HDassert(!((type->flags & H5C__CLASS_SKIP_READS) &&
(type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
-
HDassert(H5F_addr_defined(addr));
HDassert(type->get_initial_load_size);
HDassert(type->deserialize);
@@ -498,6 +497,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
* relationships now. The client will restore the relationship(s) with
* the deserialized entry if appropriate.
*/
+ HDassert(pf_entry_ptr->fd_parent_count == pf_entry_ptr->flush_dep_nparents);
for(i = (int)(pf_entry_ptr->fd_parent_count) - 1; i >= 0; i--) {
HDassert(pf_entry_ptr->flush_dep_parent);
HDassert(pf_entry_ptr->flush_dep_parent[i]);
@@ -525,7 +525,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
*/
if(pf_entry_ptr->fd_child_count > 0) {
if(NULL == (fd_children = (H5C_cache_entry_t **)H5MM_calloc(sizeof(H5C_cache_entry_t **) * (size_t)(pf_entry_ptr->fd_child_count + 1))))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd child ptr array")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd child ptr array")
if(H5C__destroy_pf_entry_child_flush_deps(cache_ptr, pf_entry_ptr, fd_children) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry child flush dependency(s).")
@@ -544,7 +544,6 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
*/
if(NULL == (thing = type->deserialize(pf_entry_ptr->image_ptr, len, udata, &dirty)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't deserialize image")
-
ds_entry_ptr = (H5C_cache_entry_t *)thing;
/* In general, an entry should be clean just after it is loaded.
@@ -583,8 +582,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
ds_entry_ptr->image_ptr = pf_entry_ptr->image_ptr;
ds_entry_ptr->image_up_to_date = !dirty;
ds_entry_ptr->type = type;
- ds_entry_ptr->is_dirty = dirty |
- pf_entry_ptr->is_dirty;
+ ds_entry_ptr->is_dirty = dirty | pf_entry_ptr->is_dirty;
ds_entry_ptr->dirtied = FALSE;
ds_entry_ptr->is_protected = FALSE;
ds_entry_ptr->is_read_only = FALSE;
@@ -626,7 +624,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
pf_entry_ptr->coll_prev = NULL;
#endif /* H5_HAVE_PARALLEL */
- /* initialize cache image related fields */
+ /* Initialize cache image related fields */
ds_entry_ptr->include_in_image = FALSE;
ds_entry_ptr->lru_rank = 0;
ds_entry_ptr->image_dirty = FALSE;
@@ -638,6 +636,10 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
ds_entry_ptr->prefetched = FALSE;
ds_entry_ptr->prefetch_type_id = 0;
ds_entry_ptr->age = 0;
+ ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty;
+#ifndef NDEBUG /* debugging field */
+ ds_entry_ptr->serialization_count = 0;
+#endif /* NDEBUG */
H5C__RESET_CACHE_ENTRY_STATS(ds_entry_ptr);
@@ -666,7 +668,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
* and H5C__FLUSH_CLEAR_ONLY_FLAG flags set.
*/
pf_entry_ptr->image_ptr = NULL;
- if ( pf_entry_ptr->is_dirty ) {
+ if(pf_entry_ptr->is_dirty) {
HDassert(pf_entry_ptr->in_slist);
flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
} /* end if */
@@ -684,7 +686,6 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
H5C__INSERT_IN_INDEX(cache_ptr, ds_entry_ptr, FAIL)
HDassert(!ds_entry_ptr->in_slist);
-
if(ds_entry_ptr->is_dirty)
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, ds_entry_ptr, FAIL)
@@ -699,16 +700,13 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
(ds_entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, ds_entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry loaded into cache")
- /* restore flush dependencies with the flush dependency children of
+ /* Restore flush dependencies with the flush dependency children of
* of the prefetched entry. Note that we must protect *ds_entry_ptr
* before the call to avoid triggering sanity check failures, and
* then unprotect it afterwards.
*/
i = 0;
if(fd_children != NULL) {
- int j;
- hbool_t found;
-
H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, ds_entry_ptr, FAIL)
ds_entry_ptr->is_protected = TRUE;
while(fd_children[i] != NULL) {
@@ -718,15 +716,22 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
HDassert((fd_children[i])->fd_parent_count > 0);
HDassert((fd_children[i])->fd_parent_addrs);
- j = 0;
- found = FALSE;
- while((j < (int)((fd_children[i])->fd_parent_count)) && (!found)) {
- if((fd_children[i])->fd_parent_addrs[j] == ds_entry_ptr->addr)
- found = TRUE;
+#ifndef NDEBUG
+ {
+ int j;
+ hbool_t found;
- j++;
- } /* end while */
- HDassert(found);
+ j = 0;
+ found = FALSE;
+ while((j < (int)((fd_children[i])->fd_parent_count)) && (!found)) {
+ if((fd_children[i])->fd_parent_addrs[j] == ds_entry_ptr->addr)
+ found = TRUE;
+
+ j++;
+ } /* end while */
+ HDassert(found);
+ }
+#endif /* NDEBUG */
if(H5C_create_flush_dependency(ds_entry_ptr, fd_children[i]) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore child flush dependency")
@@ -735,7 +740,6 @@ H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
} /* end while */
H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, ds_entry_ptr, FAIL);
-
ds_entry_ptr->is_protected = FALSE;
} /* end if ( fd_children != NULL ) */
HDassert((unsigned)i == ds_entry_ptr->fd_child_count);
@@ -1262,6 +1266,26 @@ H5C__prep_image_for_file_close(H5F_t *f, hid_t dxpl_id)
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "can't load cache image")
} /* end if */
+ /* Before we start to generate the cache image (if requested), verify
+ * that the superblock supports superblock extension messages, and
+ * silently cancel any request for a cache image if it does not.
+ *
+ * Ideally, we would do this when the cache image is requested,
+ * but the necessary information is not necessary available at that
+ * time -- hence this last minute check.
+ *
+ * Note that under some error conditions, the superblock will be
+ * undefined in this case as well -- if so, assume that the
+ * superblock does not support superblock extension messages.
+ */
+ if((NULL == f->shared->sblock) ||
+ (f->shared->sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2)) {
+ H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL;
+
+ cache_ptr->image_ctl = default_image_ctl;
+ HDassert(!(cache_ptr->image_ctl.generate_image));
+ } /* end if */
+
/* Generate the cache image, if requested */
if(cache_ptr->image_ctl.generate_image) {
/* Create the cache image super block extension message.
@@ -1386,7 +1410,7 @@ H5C__prep_image_for_file_close(H5F_t *f, hid_t dxpl_id)
*/
if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
if(H5C__write_cache_image_superblock_msg(f, dxpl_id, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "update of cache image SB mesg failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "update of cache image SB mesg failed")
/* At this point:
*
@@ -1440,7 +1464,7 @@ H5C__prep_image_for_file_close(H5F_t *f, hid_t dxpl_id)
*/
if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_MDCI_MSG_ID) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove MDC image msg from superblock ext.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove MDC image msg from superblock ext")
cache_ptr->image_ctl.generate_image = FALSE;
} /* end else */
@@ -1458,6 +1482,18 @@ done:
* image_ctl field of *cache_ptr. Make adjustments for
* changes in configuration as required.
*
+ * If the file is open read only, silently
+ * force the cache image configuration to its default
+ * (which disables construction of a cache image).
+ *
+ * Note that in addition to being inapplicable in the
+ * read only case, cache image is also inapplicable if
+ * the superblock does not support superblock extension
+ * messages. Unfortunately, this information need not
+ * be available at this point. Thus we check for this
+ * later, in H5C_prep_for_file_close() and cancel the
+ * cache image request if appropriate.
+ *
* Fail if the new configuration is invalid.
*
* Return: SUCCEED on success, and FAIL on failure.
@@ -1483,26 +1519,13 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr,
/* Check arguments */
if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry")
- if(config_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL config_ptr on entry")
- if(config_ptr->version != H5C__CURR_CACHE_IMAGE_CTL_VER)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Unknown config version")
- /* check general configuration section of the config: */
+ /* Validate the config: */
if(H5C_validate_cache_image_config(config_ptr) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid cache image configuration")
- if(H5F_INTENT(f) & H5F_ACC_RDWR) /* file has been opened R/W */
- cache_ptr->image_ctl = *config_ptr;
- else { /* file opened R/O -- suppress cache image silently */
- H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL;
-
- cache_ptr->image_ctl = default_image_ctl;
- HDassert(!(cache_ptr->image_ctl.generate_image));
- } /* end else */
-
#ifdef H5_HAVE_PARALLEL
- /* the collective metadata write code is not currently compatible
+ /* The collective metadata write code is not currently compatible
* with cache image. Until this is fixed, suppress cache image silently
* if there is more than one process.
* JRM -- 11/8/16
@@ -1513,6 +1536,30 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr,
cache_ptr->image_ctl = default_image_ctl;
HDassert(!(cache_ptr->image_ctl.generate_image));
} /* end if */
+ else {
+#endif /* H5_HAVE_PARALLEL */
+ /* A cache image can only be generated if the file is opened read / write
+ * and the superblock supports superblock extension messages.
+ *
+ * However, the superblock version is not available at this point --
+ * hence we can only check the former requirement now. Do the latter
+ * check just before we construct the image..
+ *
+ * If the file is opened read / write, apply the supplied configuration.
+ *
+ * If it is not, set the image configuration to the default, which has
+ * the effect of silently disabling the cache image if it was requested.
+ */
+ if(H5F_INTENT(f) & H5F_ACC_RDWR)
+ cache_ptr->image_ctl = *config_ptr;
+ else {
+ H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL;
+
+ cache_ptr->image_ctl = default_image_ctl;
+ HDassert(!(cache_ptr->image_ctl.generate_image));
+ } /* end else */
+#ifdef H5_HAVE_PARALLEL
+ } /* end else */
#endif /* H5_HAVE_PARALLEL */
done:
@@ -3186,7 +3233,13 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
* this as otherwise the cache will attempt to write them on file
* close. Since the file is R/O, the metadata cache image superblock
* extension message and the cache image block will not be removed.
- * Hence no danger in this.
+ * Hence no danger in this for subsequent opens.
+ *
+ * However, if the dirty entry (marked clean for purposes of the R/O
+ * file open) is evicted and then referred to, the cache will read
+ * either invalid or obsolete data from the file. Handle this by
+ * setting the prefetched_dirty field, and hiding such entries from
+ * the eviction candidate selection algorithm.
*/
pf_entry_ptr->is_dirty = (is_dirty && file_is_rw);
@@ -3204,6 +3257,8 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
/* Decode dirty dependency child count */
UINT16DECODE(p, pf_entry_ptr->fd_dirty_child_count);
+ if(!file_is_rw)
+ pf_entry_ptr->fd_dirty_child_count = 0;
if(pf_entry_ptr->fd_dirty_child_count > pf_entry_ptr->fd_child_count)
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid dirty flush dependency child count")
@@ -3263,6 +3318,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
pf_entry_ptr->image_up_to_date = TRUE;
pf_entry_ptr->type = H5AC_PREFETCHED_ENTRY;
pf_entry_ptr->prefetched = TRUE;
+ pf_entry_ptr->prefetched_dirty = is_dirty && (!file_is_rw);
/* Sanity checks */
HDassert(pf_entry_ptr->size > 0 && pf_entry_ptr->size < H5C_MAX_ENTRY_SIZE);
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index ebb98b3..1f43866 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -65,6 +65,11 @@
/* Local Prototypes */
/********************/
static herr_t H5C__collective_write(H5F_t *f, hid_t dxpl_id);
+static herr_t H5C__flush_candidate_entries(H5F_t *f, hid_t dxpl_id,
+ unsigned entries_to_flush[H5C_RING_NTYPES],
+ unsigned entries_to_clear[H5C_RING_NTYPES]);
+static herr_t H5C__flush_candidates_in_ring(H5F_t *f, hid_t dxpl_id,
+ H5C_ring_t ring, unsigned entries_to_flush, unsigned entries_to_clear);
/*********************/
@@ -175,31 +180,18 @@ H5C_apply_candidate_list(H5F_t * f,
int mpi_rank,
int mpi_size)
{
- hbool_t restart_scan;
- hbool_t prev_is_dirty;
int i;
int m;
int n;
unsigned first_entry_to_flush;
unsigned last_entry_to_flush;
- unsigned entries_to_clear = 0;
- unsigned entries_to_flush = 0;
- unsigned entries_to_flush_or_clear_last = 0;
- unsigned entries_to_flush_collectively = 0;
- unsigned entries_cleared = 0;
- unsigned entries_flushed = 0;
- unsigned entries_delayed = 0;
- unsigned entries_flushed_or_cleared_last = 0;
- unsigned entries_flushed_collectively = 0;
- unsigned entries_examined = 0;
- unsigned initial_list_len;
+ unsigned total_entries_to_clear = 0;
+ unsigned total_entries_to_flush = 0;
int * candidate_assignment_table = NULL;
+ unsigned entries_to_flush[H5C_RING_NTYPES];
+ unsigned entries_to_clear[H5C_RING_NTYPES];
haddr_t addr;
- H5C_cache_entry_t * clear_ptr = NULL;
- H5C_cache_entry_t * next_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
- H5C_cache_entry_t * flush_ptr = NULL;
- H5C_cache_entry_t * delayed_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
haddr_t last_addr;
#endif /* H5C_DO_SANITY_CHECKS */
@@ -220,6 +212,10 @@ H5C_apply_candidate_list(H5F_t * f,
HDassert(0 <= mpi_rank);
HDassert(mpi_rank < mpi_size);
+ /* Initialize the entries_to_flush and entries_to_clear arrays */
+ HDmemset(entries_to_flush, 0, sizeof(entries_to_flush));
+ HDmemset(entries_to_clear, 0, sizeof(entries_to_clear));
+
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
HDfprintf(stdout, "%s:%d: setting up candidate assignment table.\n", FUNC, mpi_rank);
@@ -330,17 +326,26 @@ H5C_apply_candidate_list(H5F_t * f,
*/
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry is protected?!?!?")
+ /* Sanity checks */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= H5C_RING_USER);
+ HDassert(entry_ptr->ring <= H5C_RING_SB);
+ HDassert(!entry_ptr->flush_immediately);
+ HDassert(!entry_ptr->clear_on_unprotect);
+
/* Determine whether the entry is to be cleared or flushed,
* and mark it accordingly. We will scan the protected and
* pinned list shortly, and clear or flush according to these
* markings.
*/
if(u >= first_entry_to_flush && u <= last_entry_to_flush) {
- entries_to_flush++;
+ total_entries_to_flush++;
+ entries_to_flush[entry_ptr->ring]++;
entry_ptr->flush_immediately = TRUE;
} /* end if */
else {
- entries_to_clear++;
+ total_entries_to_clear++;
+ entries_to_clear[entry_ptr->ring]++;
entry_ptr->clear_on_unprotect = TRUE;
} /* end else */
@@ -356,372 +361,36 @@ H5C_apply_candidate_list(H5F_t * f,
} /* end if */
} /* end for */
+#if H5C_DO_SANITY_CHECKS
+ m = 0;
+ n = 0;
+ for(i = 0; i < H5C_RING_NTYPES; i++) {
+ m += (int)entries_to_flush[i];
+ n += (int)entries_to_clear[i];
+ } /* end if */
+
+ HDassert((unsigned)m == total_entries_to_flush);
+ HDassert((unsigned)n == total_entries_to_clear);
+#endif /* H5C_DO_SANITY_CHECKS */
+
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %u/%u/%u.\n",
- FUNC, mpi_rank, num_candidates, entries_to_clear,
- entries_to_flush);
+ FUNC, mpi_rank, num_candidates, total_entries_to_clear,
+ total_entries_to_flush);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
/* We have now marked all the entries on the candidate list for
* either flush or clear -- now scan the LRU and the pinned list
- * for these entries and do the deed.
+ * for these entries and do the deed. Do this via a call to
+ * H5C__flush_candidate_entries().
*
* Note that we are doing things in this round about manner so as
* to preserve the order of the LRU list to the best of our ability.
* If we don't do this, my experiments indicate that we will have a
* noticably poorer hit ratio as a result.
*/
-
-#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: scanning LRU list. len = %d.\n", FUNC, mpi_rank,
- (int)(cache_ptr->LRU_list_len));
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* ===================================================================== *
- * Now scan the LRU and PEL lists, flushing or clearing entries as
- * needed.
- *
- * The flush_me_last flag may dictate how or
- * when some entries can be flushed, and should be addressed here.
- * However, in their initial implementation, these flags only apply to the
- * superblock, so there's only a relatively small change to this function
- * to account for this one case where they come into play. If these flags
- * are ever expanded upon, this function and the following flushing steps
- * should be reworked to account for additional cases.
- * ===================================================================== */
-
- HDassert(entries_to_flush >= 0);
-
- restart_scan = FALSE;
- entries_examined = 0;
- initial_list_len = cache_ptr->LRU_list_len;
- entry_ptr = cache_ptr->LRU_tail_ptr;
-
- /* Examine each entry in the LRU list */
- while ( ( entry_ptr != NULL )
- &&
- ( entries_examined <= (entries_to_flush + 1) * initial_list_len )
- &&
- ( (entries_cleared + entries_flushed) < num_candidates ) ) {
-
- if ( entry_ptr->prev != NULL )
- prev_is_dirty = entry_ptr->prev->is_dirty;
-
- /* If this process needs to clear this entry. */
- if(entry_ptr->clear_on_unprotect) {
-
- HDassert(entry_ptr->is_dirty);
-
- next_ptr = entry_ptr->next;
- entry_ptr->clear_on_unprotect = FALSE;
- clear_ptr = entry_ptr;
- entry_ptr = entry_ptr->prev;
- entries_cleared++;
-
-#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
- HDfprintf(stdout, "%s:%d: clearing 0x%llx.\n", FUNC, mpi_rank,
- (long long)clear_ptr->addr);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* reset entries_removed_counter and
- * last_entry_removed_ptr prior to the call to
- * H5C__flush_single_entry() so that we can spot
- * unexpected removals of entries from the cache,
- * and set the restart_scan flag if proceeding
- * would be likely to cause us to scan an entry
- * that is no longer in the cache.
- *
- * Note that as of this writing (April 2015) this
- * case cannot occur in the parallel case. However
- * Quincey is making noises about changing this, hence
- * the insertion of this test.
- *
- * Note also that there is no test code to verify
- * that this code actually works (although similar code
- * in the serial version exists and is tested).
- *
- * Implementing a test will likely require implementing
- * flush op like facilities in the parallel tests. At
- * a guess this will not be terribly painful, but it
- * will take a bit of time.
- */
- cache_ptr->entries_removed_counter = 0;
- cache_ptr->last_entry_removed_ptr = NULL;
-
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
-
- if((cache_ptr->entries_removed_counter > 1) ||
- (cache_ptr->last_entry_removed_ptr == entry_ptr))
- restart_scan = TRUE;
- } /* end if */
-
- /* Else, if this process needs to flush this entry. */
- else if (entry_ptr->flush_immediately) {
-
- HDassert(entry_ptr->is_dirty);
-
- next_ptr = entry_ptr->next;
- entry_ptr->flush_immediately = FALSE;
- flush_ptr = entry_ptr;
- entry_ptr = entry_ptr->prev;
- entries_flushed++;
-
-#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
- HDfprintf(stdout, "%s:%d: flushing 0x%llx.\n", FUNC, mpi_rank,
- (long long)flush_ptr->addr);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* reset entries_removed_counter and
- * last_entry_removed_ptr prior to the call to
- * H5C__flush_single_entry() so that we can spot
- * unexpected removals of entries from the cache,
- * and set the restart_scan flag if proceeding
- * would be likely to cause us to scan an entry
- * that is no longer in the cache.
- *
- * Note that as of this writing (April 2015) this
- * case cannot occur in the parallel case. However
- * Quincey is making noises about changing this, hence
- * the insertion of this test.
- *
- * Note also that there is no test code to verify
- * that this code actually works (although similar code
- * in the serial version exists and is tested).
- *
- * Implementing a test will likely require implementing
- * flush op like facilities in the parallel tests. At
- * a guess this will not be terribly painful, but it
- * will take a bit of time.
- */
- cache_ptr->entries_removed_counter = 0;
- cache_ptr->last_entry_removed_ptr = NULL;
-
- /* Add this entry to the list of entries to collectively write */
- if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
-
- if((cache_ptr->entries_removed_counter > 1) ||
- (cache_ptr->last_entry_removed_ptr == entry_ptr))
- restart_scan = TRUE;
- } /* end else-if */
-
- /* Otherwise, no action to be taken on this entry. Grab the next. */
- else {
- entry_ptr = entry_ptr->prev;
-
- if ( entry_ptr != NULL )
- next_ptr = entry_ptr->next;
-
- } /* end else */
-
- if ( ( entry_ptr != NULL )
- &&
- ( ( restart_scan )
- ||
- ( entry_ptr->is_dirty != prev_is_dirty )
- ||
- ( entry_ptr->next != next_ptr )
- ||
- ( entry_ptr->is_protected )
- ||
- ( entry_ptr->is_pinned )
- )
- ) {
-
- /* something has happened to the LRU -- start over
- * from the tail.
- *
- * Recall that this code should be un-reachable at present,
- * as all the operations by entries on flush that could cause
- * it to be reachable are disallowed in the parallel case at
- * present. Hence the following assertion which should be
- * removed if the above changes.
- */
-
- HDassert( ! restart_scan );
- HDassert( entry_ptr->is_dirty == prev_is_dirty );
- HDassert( entry_ptr->next == next_ptr );
- HDassert( ! entry_ptr->is_protected );
- HDassert( ! entry_ptr->is_pinned );
-
- HDassert(FALSE); /* see comment above */
-
- restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
-/*
- H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
-*/
- }
-
- entries_examined++;
- } /* end while */
-
-#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: entries examined/cleared/flushed = %u/%u/%u.\n",
- FUNC, mpi_rank, entries_examined,
- entries_cleared, entries_flushed);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* It is also possible that some of the cleared entries are on the
- * pinned list. Must scan that also.
- *
- * WARNING:
- *
- * As we now allow unpinning, and removal of other entries as a side
- * effect of flushing an entry, it is possible that the next entry
- * in a PEL scan could either be no longer pinned, or no longer in
- * the cache by the time we get to it.
- *
- * At present, this is not possible in this case, as we disallow such
- * operations in the parallel version of the library. However, Quincey
- * has been making noises about relaxing this. If and when he does,
- * we have a potential problem here.
- *
- * The same issue exists in the serial cache, and there are tests
- * to detect this problem when it occurs, and adjust to it. As seen
- * above in the LRU scan, I have ported such tests to the parallel
- * code where a close cognate exists in the serial code.
- *
- * I haven't done so here, as there are no PEL scans where the problem
- * can occur in the serial code. Needless to say, this will have to
- * be repaired if the constraints on pre_serialize and serialize
- * callbacks are relaxed in the parallel version of the metadata cache.
- *
- * JRM -- 4/1/15
- */
-
-#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: scanning pinned entry list. len = %d\n",
- FUNC, mpi_rank, (int)(cache_ptr->pel_len));
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- entry_ptr = cache_ptr->pel_head_ptr;
- while((entry_ptr != NULL) &&
- ((entries_cleared + entries_flushed + entries_delayed)
- < num_candidates)) {
-
- /* If entry is marked for flush or for clear */
- if((entry_ptr->clear_on_unprotect||entry_ptr->flush_immediately)) {
-
- /* If this entry needs to be flushed last */
- if (entry_ptr->flush_me_last) {
-
- /* At this time, only the superblock supports being
- flushed last. Conveniently, it also happens to be the only
- entry that supports being flushed collectively, as well. Also
- conveniently, it's always pinned, so we only need to check
- for it while scanning the PEL here. Finally, it's never
- included in a candidate list that excludes other dirty
- entries in a cache, so we can handle this relatively simple
- case here.
-
- For now, this function asserts this and saves the entry
- to flush it after scanning the rest of the PEL list.
-
- If there are ever more entries that either need to be
- flushed last and/or flushed collectively, this whole routine
- will need to be reworked to handle all additional cases. As
- it is the simple case of a single pinned entry needing
- flushed last and collectively is just a minor addition to
- this routine, but signficantly buffing up the usage of
- flush_me_last will require a more
- intense rework of this function and potentially the function
- of candidate lists as a whole. */
-
- entries_to_flush_or_clear_last++;
- entries_to_flush_collectively++;
- HDassert(entries_to_flush_or_clear_last == 1);
- HDassert(entries_to_flush_collectively == 1);
-
- /* Delay the entry. It will be flushed later. */
- delayed_ptr = entry_ptr;
- entries_delayed++;
- HDassert(entries_delayed == 1);
-
- } /* end if */
-
- /* Else, this process needs to clear this entry. */
- else if (entry_ptr->clear_on_unprotect) {
- HDassert(!entry_ptr->flush_immediately);
- entry_ptr->clear_on_unprotect = FALSE;
- clear_ptr = entry_ptr;
- entry_ptr = entry_ptr->next;
- entries_cleared++;
-
-#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
- HDfprintf(stdout, "%s:%d: clearing 0x%llx.\n", FUNC, mpi_rank,
- (long long)clear_ptr->addr);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
- } /* end else-if */
-
- /* Else, if this process needs to independently flush this entry. */
- else if (entry_ptr->flush_immediately) {
- entry_ptr->flush_immediately = FALSE;
- flush_ptr = entry_ptr;
- entry_ptr = entry_ptr->next;
- entries_flushed++;
-
-#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
- HDfprintf(stdout, "%s:%d: flushing 0x%llx.\n", FUNC, mpi_rank,
- (long long)flush_ptr->addr);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* Add this entry to the list of entries to collectively write */
- if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
- } /* end else-if */
- } /* end if */
-
- /* Otherwise, this entry is not marked for flush or clear. Grab the next. */
- else {
- entry_ptr = entry_ptr->next;
- } /* end else */
-
- } /* end while */
-
-#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout,
- "%s:%d: pel entries examined/cleared/flushed = %u/%u/%u.\n",
- FUNC, mpi_rank, entries_examined,
- entries_cleared, entries_flushed);
- HDfprintf(stdout, "%s:%d: done.\n", FUNC, mpi_rank);
-
- HDfsync(stdout);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* ====================================================================== *
- * Now, handle all delayed entries. *
- * *
- * This can *only* be the superblock at this time, so it's relatively *
- * easy to deal with. We're collectively flushing the entry saved from *
- * above. This will need to be handled differently if there are ever more *
- * than one entry needing this special treatment.) *
- * ====================================================================== */
-
- if (delayed_ptr) {
-
- if (delayed_ptr->clear_on_unprotect) {
- if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
-
- entry_ptr->clear_on_unprotect = FALSE;
- entries_cleared++;
- } else if (delayed_ptr->flush_immediately) {
- /* Add this entry to the list of entries to collectively write */
- if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry collectively.")
-
- entry_ptr->flush_immediately = FALSE;
- entries_flushed++;
- } /* end if */
-
- entries_flushed_collectively++;
- entries_flushed_or_cleared_last++;
- } /* end if */
+ if(H5C__flush_candidate_entries(f, dxpl_id, entries_to_flush, entries_to_clear) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates failed")
/* If we've deferred writing to do it collectively, take care of that now */
if(f->coll_md_write) {
@@ -733,21 +402,6 @@ H5C_apply_candidate_list(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "can't write metadata collectively")
} /* end if */
- /* ====================================================================== *
- * Finished flushing everything. *
- * ====================================================================== */
-
- HDassert((entries_flushed == entries_to_flush));
- HDassert((entries_cleared == entries_to_clear));
- HDassert((entries_flushed_or_cleared_last == entries_to_flush_or_clear_last));
- HDassert((entries_flushed_collectively == entries_to_flush_collectively));
-
- if((entries_flushed != entries_to_flush) ||
- (entries_cleared != entries_to_clear) ||
- (entries_flushed_or_cleared_last != entries_to_flush_or_clear_last) ||
- (entries_flushed_collectively != entries_to_flush_collectively))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry count mismatch")
-
done:
if(candidate_assignment_table != NULL)
candidate_assignment_table = (int *)H5MM_xfree((void *)candidate_assignment_table);
@@ -998,11 +652,13 @@ H5C_mark_entries_as_clean(H5F_t * f,
{
H5C_t * cache_ptr;
unsigned entries_cleared;
+ unsigned pinned_entries_cleared;
+ hbool_t progress;
unsigned entries_examined;
unsigned initial_list_len;
haddr_t addr;
-#if H5C_DO_SANITY_CHECKS
unsigned pinned_entries_marked = 0;
+#if H5C_DO_SANITY_CHECKS
unsigned protected_entries_marked = 0;
unsigned other_entries_marked = 0;
haddr_t last_addr;
@@ -1086,19 +742,13 @@ H5C_mark_entries_as_clean(H5F_t * f,
} /* end if */
entry_ptr->clear_on_unprotect = TRUE;
+ if(entry_ptr->is_pinned)
+ pinned_entries_marked++;
#if H5C_DO_SANITY_CHECKS
- if ( entry_ptr->is_protected ) {
-
+ else if(entry_ptr->is_protected)
protected_entries_marked++;
-
- } else if ( entry_ptr->is_pinned ) {
-
- pinned_entries_marked++;
-
- } else {
-
+ else
other_entries_marked++;
- }
#endif /* H5C_DO_SANITY_CHECKS */
}
}
@@ -1140,7 +790,8 @@ H5C_mark_entries_as_clean(H5F_t * f,
entry_ptr = entry_ptr->prev;
entries_cleared++;
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ if(H5C__flush_single_entry(f, dxpl_id, clear_ptr,
+ (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG)) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear entry")
} /* end if */
else
@@ -1155,19 +806,27 @@ H5C_mark_entries_as_clean(H5F_t * f,
/* It is also possible that some of the cleared entries are on the
* pinned list. Must scan that also.
*/
- entry_ptr = cache_ptr->pel_head_ptr;
- while(entry_ptr != NULL) {
- if(entry_ptr->clear_on_unprotect) {
- entry_ptr->clear_on_unprotect = FALSE;
- clear_ptr = entry_ptr;
- entry_ptr = entry_ptr->next;
- entries_cleared++;
+ pinned_entries_cleared = 0;
+ progress = TRUE;
+ while((pinned_entries_cleared < pinned_entries_marked) && progress) {
+ progress = FALSE;
+ entry_ptr = cache_ptr->pel_head_ptr;
+ while(entry_ptr != NULL) {
+ if(entry_ptr->clear_on_unprotect && entry_ptr->flush_dep_ndirty_children == 0) {
+ entry_ptr->clear_on_unprotect = FALSE;
+ clear_ptr = entry_ptr;
+ entry_ptr = entry_ptr->next;
+ entries_cleared++;
+ pinned_entries_cleared++;
+ progress = TRUE;
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry")
- } /* end if */
- else
- entry_ptr = entry_ptr->next;
+ if(H5C__flush_single_entry(f, dxpl_id, clear_ptr,
+ (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear entry")
+ } /* end if */
+ else
+ entry_ptr = entry_ptr->next;
+ } /* end while */
} /* end while */
#if H5C_DO_SANITY_CHECKS
@@ -1297,7 +956,6 @@ H5C__collective_write(H5F_t *f, hid_t dxpl_id)
/* Get number of entries in collective write list */
count = (int)H5SL_count(cache_ptr->coll_write_list);
-
if(count > 0) {
H5FD_mpio_xfer_t xfer_mode = H5FD_MPIO_COLLECTIVE;
H5SL_node_t *node;
@@ -1410,4 +1068,510 @@ done:
FUNC_LEAVE_NOAPI(ret_value);
} /* end H5C__collective_write() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__flush_candidate_entries
+ *
+ * Purpose: Flush or clear (as indicated) the candidate entries that
+ * have been marked in the metadata cache. In so doing,
+ * observe rings and flush dependencies.
+ *
+ * Note that this function presumes that:
+ *
+ * 1) no candidate entries are protected,
+ *
+ * 2) all candidate entries are dirty, and
+ *
+ * 3) if a candidate entry has a dirty flush dependency
+ * child, that child is also a candidate entry.
+ *
+ * The function will fail if any of these preconditions are
+ * not met.
+ *
+ * Candidate entries are marked by setting either the
+ * flush_immediately or the clear_on_unprotect flags in the
+ * cache entry (but not both). Entries marked flush_immediately
+ * will be flushed, those marked clear_on_unprotect will be
+ * cleared.
+ *
+ * Note that this function is a modified version of
+ * H5C_flush_cache() -- any changes there may need to be
+ * reflected here and vise versa.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer
+ * 2/10/17
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flush_candidate_entries(H5F_t *f, hid_t dxpl_id,
+ unsigned entries_to_flush[H5C_RING_NTYPES],
+ unsigned entries_to_clear[H5C_RING_NTYPES])
+{
+#if H5C_DO_SANITY_CHECKS
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ H5C_ring_t ring;
+ H5C_t * cache_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ cache_ptr = f->shared->cache;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+ HDassert(entries_to_flush[H5C_RING_UNDEFINED] == 0);
+ HDassert(entries_to_clear[H5C_RING_UNDEFINED] == 0);
+
+#if H5C_DO_SANITY_CHECKS
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if(H5C_validate_protected_entry_list(cache_ptr) < 0
+ || H5C_validate_pinned_entry_list(cache_ptr) < 0
+ || H5C_validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ cache_ptr->flush_in_progress = TRUE;
+
+ /* flush each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ if(H5C__flush_candidates_in_ring(f, dxpl_id, ring, entries_to_flush[ring], entries_to_clear[ring]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates in ring failed")
+
+ ring++;
+ } /* end while */
+
+done:
+ cache_ptr->flush_in_progress = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__flush_candidate_entries() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__flush_candidates_in_ring
+ *
+ * Purpose: Flush or clear (as indicated) the candidate entries
+ * contained in the specified cache and ring. All candidate
+ * entries in rings outside the specified ring must have been
+ * flushed (or cleared) on entry.
+ *
+ * Note that this function presumes that:
+ *
+ * 1) no candidate entries are protected,
+ *
+ * 2) all candidate entries are dirty, and
+ *
+ * 3) if a candidate entry has a dirty flush dependency
+ * child, that child is also a candidate entry.
+ *
+ * The function will fail if any of these preconditions are
+ * not met.
+ *
+ * Candidate entries are marked by setting either the
+ * flush_immediately or the clear_on_unprotect flags in the
+ * cache entry (but not both). Entries marked flush_immediately
+ * will be flushed, those marked clear_on_unprotect will be
+ * cleared.
+ *
+ * Candidate entries residing in the LRU must be flushed
+ * (or cleared) in LRU order to avoid performance issues.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer
+ * 2/10/17
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flush_candidates_in_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring,
+ unsigned entries_to_flush, unsigned entries_to_clear)
+{
+ H5C_t * cache_ptr;
+ hbool_t progress;
+ hbool_t restart_scan = FALSE;
+ unsigned entries_flushed = 0;
+ unsigned entries_cleared = 0;
+#if H5C_DO_SANITY_CHECKS
+ unsigned init_index_len;
+#endif /* H5C_DO_SANITY_CHECKS */
+ unsigned clear_flags = H5C__FLUSH_CLEAR_ONLY_FLAG |
+ H5C__GENERATE_IMAGE_FLAG;
+ unsigned flush_flags = H5C__NO_FLAGS_SET;
+ unsigned op_flags;
+ H5C_cache_entry_t *op_ptr;
+ H5C_cache_entry_t *entry_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+#if H5C_DO_SANITY_CHECKS
+ /* index len should not change */
+ init_index_len = cache_ptr->index_len;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* Examine entries in the LRU list, and flush or clear all entries
+ * so marked in the target ring.
+ *
+ * With the current implementation of flush dependencies, no entry
+ * in the LRU can have flush dependency children -- thus one pass
+ * through the LRU will be sufficient.
+ *
+ * It is possible that this will change -- hence the assertion.
+ */
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+ while(((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear))
+ && (entry_ptr != NULL)) {
+ hbool_t prev_is_dirty = FALSE;
+ H5C_cache_entry_t *next_ptr;
+
+ /* Entries in the LRU must not have flush dependency children */
+ HDassert(entry_ptr->flush_dep_nchildren == 0);
+
+ /* Remember dirty state of entry to advance to */
+ if(entry_ptr->prev != NULL)
+ prev_is_dirty = entry_ptr->prev->is_dirty;
+
+ /* If the entry is in the ring */
+ if(entry_ptr->ring == ring) {
+ /* If this process needs to clear this entry. */
+ if(entry_ptr->clear_on_unprotect) {
+ HDassert(entry_ptr->is_dirty);
+
+ /* Set entry and flags for operation */
+ op_ptr = entry_ptr;
+ op_flags = clear_flags;
+
+ /* Set next entry appropriately */
+ next_ptr = entry_ptr->next;
+
+ /* Reset entry flag */
+ entry_ptr->clear_on_unprotect = FALSE;
+ entries_cleared++;
+ } /* end if */
+ else if(entry_ptr->flush_immediately) {
+ HDassert(entry_ptr->is_dirty);
+
+ /* Set entry and flags for operation */
+ op_ptr = entry_ptr;
+ op_flags = flush_flags;
+
+ /* Set next entry appropriately */
+ next_ptr = entry_ptr->next;
+
+ /* Reset entry flag */
+ entry_ptr->flush_immediately = FALSE;
+ entries_flushed++;
+ } /* end else-if */
+ else {
+ /* No operation for this entry */
+ op_ptr = NULL;
+
+ /* Set next entry appropriately */
+ next_ptr = entry_ptr;
+ } /* end else */
+
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->prev;
+
+ /* Check for operation */
+ if(op_ptr) {
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C__flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ *
+ * Note that as of this writing, this
+ * case cannot occur in the parallel case.
+ *
+ * Note also that there is no test code to verify
+ * that this code actually works (although similar code
+ * in the serial version exists and is tested).
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
+
+ if(H5C__flush_single_entry(f, dxpl_id, op_ptr, op_flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't flush entry")
+
+ if(cache_ptr->entries_removed_counter != 0
+ || cache_ptr->last_entry_removed_ptr != NULL)
+ restart_scan = TRUE;
+ } /* end if */
+ } /* end if */
+ else {
+ /* Remember "next" pointer (after advancing entries) */
+ next_ptr = entry_ptr;
+
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->prev;
+ } /* end else */
+
+ /* Check for restarts, etc. */
+ if((entry_ptr != NULL) &&
+ (restart_scan || (entry_ptr->is_dirty != prev_is_dirty)
+ || (entry_ptr->next != next_ptr) || entry_ptr->is_protected
+ || entry_ptr->is_pinned)) {
+
+ /* Something has happened to the LRU -- start over
+ * from the tail.
+ *
+ * Recall that this code should be un-reachable at present,
+ * as all the operations by entries on flush that could cause
+ * it to be reachable are disallowed in the parallel case at
+ * present. Hence the following assertion which should be
+ * removed if the above changes.
+ */
+ HDassert(!restart_scan);
+ HDassert(entry_ptr->is_dirty == prev_is_dirty);
+ HDassert(entry_ptr->next == next_ptr);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->is_pinned);
+
+ HDassert(FALSE); /* see comment above */
+
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ } /* end while */
+
+ /* It is also possible that some of the cleared entries are on the
+ * pinned list. Must scan that also.
+ *
+ * Observe that in the case of the pinned entry list, most of the
+ * entries will have flush dependency children. As entries with
+ * flush dependency children may not be flushed until all of their
+ * children are clean, multiple passes throguh the pinned entry list
+ * may be required.
+ *
+ * WARNING:
+ *
+ * As we now allow unpinning, and removal of other entries as a side
+ * effect of flushing an entry, it is possible that the next entry
+ * in a PEL scan could either be no longer pinned, or no longer in
+ * the cache by the time we get to it.
+ *
+ * At present, this should not be possible in this case, as we disallow
+ * such operations in the parallel version of the library. However,
+ * this may change, and to that end, I have included code to detect
+ * such changes and cause this function to fail if they are detected.
+ */
+ progress = TRUE;
+ while(progress && ((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear))) {
+ progress = FALSE;
+ entry_ptr = cache_ptr->pel_head_ptr;
+ while((entry_ptr != NULL) &&
+ ((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear))) {
+ H5C_cache_entry_t *prev_ptr;
+ hbool_t next_is_dirty = FALSE;
+
+ HDassert(entry_ptr->is_pinned);
+
+ /* Remember dirty state of entry to advance to */
+ if(entry_ptr->next != NULL)
+ next_is_dirty = entry_ptr->next->is_dirty;
+
+ if(entry_ptr->ring == ring && entry_ptr->flush_dep_ndirty_children == 0) {
+ if(entry_ptr->clear_on_unprotect) {
+ HDassert(entry_ptr->is_dirty);
+
+ /* Set entry and flags for operation */
+ op_ptr = entry_ptr;
+ op_flags = clear_flags;
+
+ /* Reset entry flag */
+ entry_ptr->clear_on_unprotect = FALSE;
+ entries_cleared++;
+ progress = TRUE;
+ } /* end if */
+ else if(entry_ptr->flush_immediately) {
+ HDassert(entry_ptr->is_dirty);
+
+ /* Set entry and flags for operation */
+ op_ptr = entry_ptr;
+ op_flags = flush_flags;
+
+ /* Reset entry flag */
+ entry_ptr->flush_immediately = FALSE;
+ entries_flushed++;
+ progress = TRUE;
+ } /* end else-if */
+ else
+ /* No operation for this entry */
+ op_ptr = NULL;
+
+ /* Check for operation */
+ if(op_ptr) {
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C__flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ *
+ * Note that as of this writing, this
+ * case cannot occur in the parallel case.
+ *
+ * Note also that there is no test code to verify
+ * that this code actually works (although similar code
+ * in the serial version exists and is tested).
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
+
+ /* Add this entry to the list of entries to collectively write
+ *
+ * This comment is misleading -- the entry will be added to the
+ * collective write list only if said list exists.
+ *
+ * JRM -- 2/9/17
+ */
+ if(H5C__flush_single_entry(f, dxpl_id, op_ptr, op_flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't flush entry")
+
+ if(cache_ptr->entries_removed_counter != 0
+ || cache_ptr->last_entry_removed_ptr != NULL)
+ restart_scan = TRUE;
+ } /* end if */
+ } /* end if */
+
+ /* Remember "previous" pointer (after advancing entries) */
+ prev_ptr = entry_ptr;
+
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->next;
+
+ /* Check for restarts, etc. */
+ if((entry_ptr != NULL) &&
+ (restart_scan || (entry_ptr->is_dirty != next_is_dirty)
+ || (entry_ptr->prev != prev_ptr) || entry_ptr->is_protected
+ || !entry_ptr->is_pinned)) {
+ /* Something has happened to the pinned entry list -- start
+ * over from the head.
+ *
+ * Recall that this code should be un-reachable at present,
+ * as all the operations by entries on flush that could cause
+ * it to be reachable are disallowed in the parallel case at
+ * present. Hence the following assertion which should be
+ * removed if the above changes.
+ */
+
+ HDassert(!restart_scan);
+ HDassert(entry_ptr->is_dirty == next_is_dirty);
+ HDassert(entry_ptr->prev == prev_ptr);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(entry_ptr->is_pinned);
+
+ HDassert(FALSE); /* see comment above */
+
+ restart_scan = FALSE;
+
+ entry_ptr = cache_ptr->pel_head_ptr;
+
+ /* we don't keeps stats for pinned entry list scan
+ * restarts. If this code ever becomes reachable,
+ * define the necessary field, and implement the
+ * the following macro:
+ *
+ * H5C__UPDATE_STATS_FOR_PEL_SCAN_RESTART(cache_ptr)
+ */
+ } /* end if */
+ } /* end while ( ( entry_ptr != NULL ) &&
+ * ( ( entries_flushed > entries_to_flush ) ||
+ * ( entries_cleared > entries_to_clear ) ) )
+ */
+ } /* end while ( ( ( entries_flushed > entries_to_flush ) ||
+ * ( entries_cleared > entries_to_clear ) ) &&
+ * ( progress ) )
+ */
+
+#if H5C_DO_SANITY_CHECKS
+ HDassert(init_index_len == cache_ptr->index_len);
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ if(entries_flushed != entries_to_flush || entries_cleared != entries_to_clear) {
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(!entry_ptr->clear_on_unprotect || (entry_ptr->ring > ring));
+ HDassert(!entry_ptr->flush_immediately || (entry_ptr->ring > ring));
+ entry_ptr = entry_ptr->il_next;
+ } /* end while */
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't flush/clear all entries")
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__flush_candidates_in_ring() */
#endif /* H5_HAVE_PARALLEL */
+
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 5b923e9..321f1fb 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -4104,6 +4104,23 @@ typedef struct H5C_tag_info_t {
* of changes to the file driver info superblock extension
* management code needed to support rings.
*
+ * msic_in_progress: As the metadata cache has become re-entrant, and as
+ * the free space manager code has become more tightly
+ * integrated with the metadata cache, it is possible that
+ * a call to H5C_insert_entry() may trigger a call to
+ * H5C_make_space_in_cache(), which, via H5C__flush_single_entry()
+ * and client callbacks, may trigger an infinite regression
+ * of calls to H5C_make_space_in_cache().
+ *
+ * The msic_in_progress boolean flag is used to detect this,
+ * and prevent the infinite regression that would otherwise
+ * occur.
+ *
+ * Note that this is issue is not hypothetical -- this field
+ * was added 2/16/17 to address this issue when it was
+ * exposed by modifications to test/fheap.c to cause it to
+ * use paged allocation.
+ *
* resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
* data for automatic cache resizing.
*
@@ -4500,12 +4517,22 @@ typedef struct H5C_tag_info_t {
* total_entries_skipped_in_msic: Number of clean entries skipped while
* enforcing the min_clean_fraction in H5C__make_space_in_cache().
*
+ * total_dirty_pf_entries_skipped_in_msic: Number of dirty prefetched entries
+ * skipped in H5C__make_space_in_cache(). Note that this can
+ * only occur when a file is opened R/O with a cache image
+ * containing dirty entries.
+ *
* total_entries_scanned_in_msic: Number of clean entries skipped while
* enforcing the min_clean_fraction in H5C__make_space_in_cache().
*
* max_entries_skipped_in_msic: Maximum number of clean entries skipped
* in any one call to H5C__make_space_in_cache().
*
+ * max_dirty_pf_entries_skipped_in_msic: Maximum number of dirty prefetched
+ * entries skipped in any one call to H5C__make_space_in_cache().
+ * Note that this can only occur when the file is opened
+ * R/O with a cache image containing dirty entries.
+ *
* max_entries_scanned_in_msic: Maximum number of entries scanned over
* in any one call to H5C__make_space_in_cache().
*
@@ -4733,6 +4760,7 @@ struct H5C_t {
hbool_t cache_full;
hbool_t size_decreased;
hbool_t resize_in_progress;
+ hbool_t msic_in_progress;
H5C_auto_size_ctl_t resize_ctl;
/* Fields for epoch markers used in automatic cache size adjustment */
@@ -4822,8 +4850,10 @@ struct H5C_t {
/* Fields for tracking 'make space in cache' (msic) operations */
int64_t calls_to_msic;
int64_t total_entries_skipped_in_msic;
+ int64_t total_dirty_pf_entries_skipped_in_msic;
int64_t total_entries_scanned_in_msic;
int32_t max_entries_skipped_in_msic;
+ int32_t max_dirty_pf_entries_skipped_in_msic;
int32_t max_entries_scanned_in_msic;
int64_t entries_scanned_to_make_space;
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 28eacf2..3408839 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -1066,9 +1066,9 @@ typedef int H5C_ring_t;
*
* is_read_only: Boolean flag that is only meaningful if is_protected is
* TRUE. In this circumstance, it indicates whether the
- * entry has been protected read only, or read/write.
+ * entry has been protected read-only, or read/write.
*
- * If the entry has been protected read only (i.e. is_protected
+ * If the entry has been protected read-only (i.e. is_protected
* and is_read_only are both TRUE), we allow the entry to be
* protected more than once.
*
@@ -1078,7 +1078,7 @@ typedef int H5C_ring_t;
* the entry is actually unprotected.
*
* ro_ref_count: Integer field used to maintain a count of the number of
- * outstanding read only protects on this entry. This field
+ * outstanding read-only protects on this entry. This field
* must be zero whenever either is_protected or is_read_only
* are TRUE.
*
@@ -1458,13 +1458,13 @@ typedef int H5C_ring_t;
* the load of a cache image block, although other scenarios
* are contemplated for the use of this feature. Note that
* unlike the usual prefetch situation, this means that a
- * pre fetched entry can be dirty, and/or can be a party to
+ * prefetched entry can be dirty, and/or can be a party to
* flush dependency relationship(s). This complicates matters
* somewhat.
*
- * The essential feature of a pre-fetched entry is that it
+ * The essential feature of a prefetched entry is that it
* consists only of a buffer containing the on disk image of
- * the entry. Thus it must be deserialized before it can
+ * the entry. Thus it must be deserialized before it can
* be passed back to the library on a protect call. This
* task is handled by H5C_deserialized_prefetched_entry().
* In essence, this routine calls the deserialize callback
@@ -1475,7 +1475,7 @@ typedef int H5C_ring_t;
*
* Further, if the prefetched entry is a flush dependency parent,
* all its flush dependency children (which must also be
- * pre-fetched entries), must be tranfered to the new cache
+ * prefetched entries), must be tranfered to the new cache
* entry returned by the deserailization callback.
*
* Finally, if the prefetched entry is a flush dependency child,
@@ -1511,6 +1511,46 @@ typedef int H5C_ring_t;
*
* This field must be zero if prefetched is FALSE.
*
+ * prefetched_dirty: Boolean field that must be set to FALSE unless the
+ * following conditions hold:
+ *
+ * 1) The file has been opened R/O.
+ *
+ * 2) The entry is either a prefetched entry, or was
+ * re-constructed from a prefetched entry.
+ *
+ * 3) The base prefetched entry was marked dirty.
+ *
+ * This field exists to solve the following problem with
+ * files containing cache images that are opened R/O.
+ *
+ * If the cache image contains a dirty entry, that entry
+ * must be marked clean when it is inserted into the cache
+ * in the read-only case, as otherwise the metadata cache
+ * will attempt to flush it on file close -- which is poor
+ * form in the read-only case.
+ *
+ * However, since the entry is marked clean, it is possible
+ * that the metadata cache will evict it if the size of the
+ * metadata in the file exceeds the size of the metadata cache,
+ * and the application visits much of this data.
+ *
+ * If this happens, and the metadata cache is then asked for
+ * this entry, it will attempt to read it from file, and will
+ * obtain either obsolete or invalid data depending on whether
+ * the entry has ever been written to it assigned location in
+ * the file.
+ *
+ * With this background, the purpose of this field should be
+ * obvious -- when set, it allows the eviction candidate
+ * selection code to skip over the entry, thus avoiding the
+ * issue.
+ *
+ * Since the issue only arises in the R/O case, there is
+ * no possible interaction with SWMR. There are also
+ * potential interactions with Evict On Close -- at present,
+ * we deal with this by disabling EOC in the R/O case.
+ *
* serialization_count: Integer field used to maintain a count of the
* number of times each entry is serialized during cache
* serialization. While no entry should be serialized more than
@@ -1627,6 +1667,7 @@ typedef struct H5C_cache_entry_t {
hbool_t prefetched;
int prefetch_type_id;
int32_t age;
+ hbool_t prefetched_dirty;
#ifndef NDEBUG /* debugging field */
int serialization_count;
@@ -2259,7 +2300,6 @@ H5_DLL herr_t H5C_set_trace_file_ptr(H5C_t *cache_ptr, FILE *trace_file_ptr);
H5_DLL herr_t H5C_stats(H5C_t *cache_ptr, const char *cache_name,
hbool_t display_detailed_stats);
H5_DLL void H5C_stats__reset(H5C_t *cache_ptr);
-H5_DLL herr_t H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name);
H5_DLL herr_t H5C_unpin_entry(void *thing);
H5_DLL herr_t H5C_destroy_flush_dependency(void *parent_thing, void *child_thing);
H5_DLL herr_t H5C_unprotect(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *thing,
@@ -2289,6 +2329,8 @@ H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t dxpl_id, unsigned ce_arr
#endif /* H5_HAVE_PARALLEL */
#ifndef NDEBUG /* debugging functions */
+H5_DLL herr_t H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name);
+H5_DLL herr_t H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name);
H5_DLL hbool_t H5C_get_serialization_in_progress(const H5C_t *cache_ptr);
H5_DLL hbool_t H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring);
H5_DLL herr_t H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn);
diff --git a/src/H5Ctag.c b/src/H5Ctag.c
index 157a838..a9bcca1 100644
--- a/src/H5Ctag.c
+++ b/src/H5Ctag.c
@@ -465,7 +465,7 @@ H5C__evict_tagged_entries_cb(H5C_cache_entry_t *entry, void *_ctx)
entry and we'll loop back around again (as evicting other
entries will hopefully unpin this entry) */
ctx->pinned_entries_need_evicted = TRUE;
- else {
+ else if(!entry->prefetched_dirty) {
/* Evict the Entry */
if(H5C__flush_single_entry(ctx->f, ctx->dxpl_id, entry, H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, H5_ITER_ERROR, "Entry eviction failed.")
diff --git a/src/H5F.c b/src/H5F.c
index 5fd3a7d..431b56f 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -406,7 +406,7 @@ H5Fis_hdf5(const char *name)
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "no file name specified")
/* call the private is_HDF5 function */
- if((ret_value = H5F_is_hdf5(name, H5AC_ind_read_dxpl_id)) < 0)
+ if((ret_value = H5F__is_hdf5(name, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "unable open file")
done:
@@ -535,7 +535,7 @@ done:
* Modifications:
* Robb Matzke, 1997-07-18
* File struct creation and destruction is through H5F_new() and
- * H5F_dest(). Reading the root symbol table entry is done with
+ * H5F__dest(). Reading the root symbol table entry is done with
* H5G_decode().
*
* Robb Matzke, 1997-09-23
@@ -713,12 +713,12 @@ H5Fflush(hid_t object_id, H5F_scope_t scope)
/* Flush other files, depending on scope */
if(H5F_SCOPE_GLOBAL == scope) {
/* Call the flush routine for mounted file hierarchies */
- if(H5F_flush_mounts(f, H5AC_ind_read_dxpl_id) < 0)
+ if(H5F_flush_mounts(f, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush mounted file hierarchy")
} /* end if */
else {
/* Call the flush routine, for this file */
- if(H5F_flush(f, H5AC_ind_read_dxpl_id, FALSE) < 0)
+ if(H5F__flush(f, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information")
} /* end else */
} /* end if */
@@ -773,7 +773,7 @@ H5Fclose(hid_t file_id)
if((nref = H5I_get_ref(file_id, FALSE)) < 0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTGET, FAIL, "can't get ID ref count")
if(nref == 1)
- if(H5F_flush(f, H5AC_ind_read_dxpl_id, FALSE) < 0)
+ if(H5F__flush(f, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
} /* end if */
@@ -838,7 +838,7 @@ H5Freopen(hid_t file_id)
done:
if(ret_value < 0 && new_file)
- if(H5F_dest(new_file, H5AC_ind_read_dxpl_id, FALSE) < 0)
+ if(H5F__dest(new_file, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, FALSE) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, "can't close file")
FUNC_LEAVE_API(ret_value)
@@ -1042,7 +1042,7 @@ H5Fget_file_image(hid_t file_id, void *buf_ptr, size_t buf_len)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a file ID")
/* call private get_file_image function */
- if((ret_value = H5F_get_file_image(file, buf_ptr, buf_len, H5AC_ind_read_dxpl_id)) < 0)
+ if((ret_value = H5F_get_file_image(file, buf_ptr, buf_len, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get file image")
done:
@@ -1619,7 +1619,7 @@ H5Fstart_swmr_write(hid_t file_id)
H5G_name_t *obj_paths=NULL; /* Group hierarchy path */
size_t u; /* Local index variable */
hbool_t setup = FALSE; /* Boolean flag to indicate whether SWMR setting is enabled */
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -1650,7 +1650,7 @@ H5Fstart_swmr_write(hid_t file_id)
HGOTO_ERROR(H5E_FILE, H5E_UNSUPPORTED, FAIL, "can't have both SWMR and MDC cache image")
/* Flush data buffers */
- if(H5F_flush(file, H5AC_ind_read_dxpl_id, FALSE) < 0)
+ if(H5F__flush(file, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information")
/* Get the # of opened named datatypes and attributes */
@@ -1704,7 +1704,9 @@ H5Fstart_swmr_write(hid_t file_id)
/* Set up I/O info for operation */
fio_info.f = file;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
/* Flush and reset the accumulator */
@@ -1923,7 +1925,7 @@ H5Fset_latest_format(hid_t file_id, hbool_t latest_format)
latest_flags = H5F_USE_LATEST_FLAGS(f, H5F_LATEST_ALL_FLAGS);
if(latest_format != (H5F_LATEST_ALL_FLAGS == latest_flags)) {
/* Call the flush routine, for this file */
- if(H5F_flush(f, H5AC_ind_read_dxpl_id, FALSE) < 0)
+ if(H5F__flush(f, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information")
/* Toggle the 'latest format' flag */
diff --git a/src/H5FD.c b/src/H5FD.c
index 660f496..db8c09c 100644
--- a/src/H5FD.c
+++ b/src/H5FD.c
@@ -1530,7 +1530,7 @@ herr_t
H5FDread(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size,
void *buf/*out*/)
{
- H5P_genplist_t *dxpl; /* DXPL object */
+ H5FD_io_info_t fdio_info; /* File driver I/O object */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -1549,13 +1549,24 @@ H5FDread(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size
if(!buf)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "null result buffer")
- /* Get the DXPL plist object for DXPL ID */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ /* Set up the file driver I/O info object */
+ fdio_info.file = file;
+ if(H5FD_MEM_DRAW == type) {
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end if */
+ else {
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end else */
/* Do the real work */
/* (Note compensating for base address addition in internal routine) */
- if(H5FD_read(file, dxpl, type, addr - file->base_addr, size, buf) < 0)
+ if(H5FD_read(&fdio_info, type, addr - file->base_addr, size, buf) < 0)
HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "file read request failed")
done:
@@ -1584,7 +1595,7 @@ herr_t
H5FDwrite(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size,
const void *buf)
{
- H5P_genplist_t *dxpl; /* DXPL object */
+ H5FD_io_info_t fdio_info; /* File driver I/O object */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -1602,13 +1613,24 @@ H5FDwrite(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t siz
if(!buf)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "null buffer")
- /* Get the DXPL plist object for DXPL ID */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ /* Set up the file driver I/O info object */
+ fdio_info.file = file;
+ if(H5FD_MEM_DRAW == type) {
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end if */
+ else {
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end else */
/* The real work */
/* (Note compensating for base address addition in internal routine) */
- if(H5FD_write(file, dxpl, type, addr - file->base_addr, size, buf) < 0)
+ if(H5FD_write(&fdio_info, type, addr - file->base_addr, size, buf) < 0)
HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "file write request failed")
done:
diff --git a/src/H5FDint.c b/src/H5FDint.c
index 744c3d1..0809ac8 100644
--- a/src/H5FDint.c
+++ b/src/H5FDint.c
@@ -93,12 +93,9 @@
*-------------------------------------------------------------------------
*/
herr_t
-H5FD_locate_signature(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, haddr_t *sig_addr)
+H5FD_locate_signature(H5FD_io_info_t *fdio_info, haddr_t *sig_addr)
{
+ H5FD_t *file;
haddr_t addr, eoa, eof;
uint8_t buf[H5F_SIGNATURE_LEN];
unsigned n, maxpow;
@@ -106,6 +103,10 @@ H5P_genplist_t *dxpl, haddr_t *sig_addr)
FUNC_ENTER_NOAPI_NOINIT
+ HDassert(fdio_info);
+ file = fdio_info->file;
+ HDassert(file);
+
/* Find the least N such that 2^N is larger than the file size */
eof = H5FD_get_eof(file, H5FD_MEM_SUPER);
eoa = H5FD_get_eoa(file, H5FD_MEM_SUPER);
@@ -124,7 +125,7 @@ H5P_genplist_t *dxpl, haddr_t *sig_addr)
addr = (8 == n) ? 0 : (haddr_t)1 << n;
if(H5FD_set_eoa(file, H5FD_MEM_SUPER, addr + H5F_SIGNATURE_LEN) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to set EOA value for file signature")
- if(H5FD_read(file, dxpl, H5FD_MEM_SUPER, addr, (size_t)H5F_SIGNATURE_LEN, buf) < 0)
+ if(H5FD_read(fdio_info, H5FD_MEM_SUPER, addr, (size_t)H5F_SIGNATURE_LEN, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to read file signature")
if(!HDmemcmp(buf, H5F_SIGNATURE, (size_t)H5F_SIGNATURE_LEN))
break;
@@ -162,29 +163,36 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5FD_read(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, H5FD_mem_t type, haddr_t addr,
+H5FD_read(H5FD_io_info_t *fdio_info, H5FD_mem_t type, haddr_t addr,
size_t size, void *buf/*out*/)
{
+ H5FD_t *file;
+ H5P_genplist_t *io_dxpl;
haddr_t eoa = HADDR_UNDEF;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
+ HDassert(fdio_info);
+ file = fdio_info->file;
HDassert(file && file->cls);
- HDassert(TRUE == H5P_class_isa(H5P_CLASS(dxpl), H5P_CLS_DATASET_XFER_g));
+ HDassert(TRUE == H5P_class_isa(H5P_CLASS(fdio_info->meta_dxpl), H5P_CLS_DATASET_XFER_g));
+ HDassert(TRUE == H5P_class_isa(H5P_CLASS(fdio_info->raw_dxpl), H5P_CLS_DATASET_XFER_g));
HDassert(buf);
+ /* Set up proper DXPL for I/O */
+ if(H5FD_MEM_DRAW == type)
+ io_dxpl = fdio_info->raw_dxpl;
+ else
+ io_dxpl = fdio_info->meta_dxpl;
+
/* Sanity check the dxpl type against the mem type */
#ifdef H5_DEBUG_BUILD
{
H5FD_dxpl_type_t dxpl_type; /* Property indicating the type of the internal dxpl */
/* get the dxpl type */
- if(H5P_get(dxpl, H5FD_DXPL_TYPE_NAME, &dxpl_type) < 0)
+ if(H5P_get(io_dxpl, H5FD_DXPL_TYPE_NAME, &dxpl_type) < 0)
HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't retrieve dxpl type")
/* we shouldn't be here if the dxpl is labeled with NO I/O */
@@ -219,7 +227,7 @@ H5P_genplist_t *dxpl, H5FD_mem_t type, haddr_t addr,
HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, addr = %llu, size = %llu, eoa = %llu", (unsigned long long)(addr + file->base_addr), (unsigned long long)size, (unsigned long long)eoa)
/* Dispatch to driver */
- if((file->cls->read)(file, type, H5P_PLIST_ID(dxpl), addr + file->base_addr, size, buf) < 0)
+ if((file->cls->read)(file, type, H5P_PLIST_ID(io_dxpl), addr + file->base_addr, size, buf) < 0)
HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read request failed")
done:
@@ -241,29 +249,36 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5FD_write(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, H5FD_mem_t type, haddr_t addr,
+H5FD_write(const H5FD_io_info_t *fdio_info, H5FD_mem_t type, haddr_t addr,
size_t size, const void *buf)
{
+ H5FD_t *file;
+ H5P_genplist_t *io_dxpl;
haddr_t eoa = HADDR_UNDEF;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
+ HDassert(fdio_info);
+ file = fdio_info->file;
HDassert(file && file->cls);
- HDassert(TRUE == H5P_class_isa(H5P_CLASS(dxpl), H5P_CLS_DATASET_XFER_g));
+ HDassert(TRUE == H5P_class_isa(H5P_CLASS(fdio_info->meta_dxpl), H5P_CLS_DATASET_XFER_g));
+ HDassert(TRUE == H5P_class_isa(H5P_CLASS(fdio_info->raw_dxpl), H5P_CLS_DATASET_XFER_g));
HDassert(buf);
+ /* Set up proper DXPL for I/O */
+ if(H5FD_MEM_DRAW == type)
+ io_dxpl = fdio_info->raw_dxpl;
+ else
+ io_dxpl = fdio_info->meta_dxpl;
+
/* Sanity check the dxpl type against the mem type */
#ifdef H5_DEBUG_BUILD
{
H5FD_dxpl_type_t dxpl_type; /* Property indicating the type of the internal dxpl */
/* get the dxpl type */
- if(H5P_get(dxpl, H5FD_DXPL_TYPE_NAME, &dxpl_type) < 0)
+ if(H5P_get(io_dxpl, H5FD_DXPL_TYPE_NAME, &dxpl_type) < 0)
HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't retrieve dxpl type")
/* we shouldn't be here if the dxpl is labeled with NO I/O */
@@ -291,7 +306,7 @@ H5P_genplist_t *dxpl, H5FD_mem_t type, haddr_t addr,
(unsigned long long)(addr+ file->base_addr), (unsigned long long)size, (unsigned long long)eoa)
/* Dispatch to driver */
- if((file->cls->write)(file, type, H5P_PLIST_ID(dxpl), addr + file->base_addr, size, buf) < 0)
+ if((file->cls->write)(file, type, H5P_PLIST_ID(io_dxpl), addr + file->base_addr, size, buf) < 0)
HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write request failed")
done:
diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h
index 45f0187..0e2928d 100644
--- a/src/H5FDprivate.h
+++ b/src/H5FDprivate.h
@@ -115,6 +115,19 @@ typedef enum {
#define H5FD_DXPL_TYPE_NAME "H5P_dxpl_type"
#endif /* H5_DEBUG_BUILD */
+/* I/O Info for an operation */
+typedef struct H5FD_io_info_t {
+ H5FD_t *file; /* File driver object */
+#ifndef H5_DEBUG_BUILD
+ const
+#endif /* H5_DEBUG_BUILD */
+ H5P_genplist_t *meta_dxpl; /* Metadata DXPL object */
+#ifndef H5_DEBUG_BUILD
+ const
+#endif /* H5_DEBUG_BUILD */
+ H5P_genplist_t *raw_dxpl; /* Raw data DXPL object */
+} H5FD_io_info_t;
+
/*****************************/
/* Library Private Variables */
@@ -126,15 +139,10 @@ typedef enum {
/******************************/
/* Forward declarations for prototype arguments */
-struct H5P_genplist_t;
struct H5F_t;
H5_DLL int H5FD_term_interface(void);
-H5_DLL herr_t H5FD_locate_signature(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, haddr_t *sig_addr);
+H5_DLL herr_t H5FD_locate_signature(H5FD_io_info_t *fdio_info, haddr_t *sig_addr);
H5_DLL H5FD_class_t *H5FD_get_class(hid_t id);
H5_DLL hsize_t H5FD_sb_size(H5FD_t *file);
H5_DLL herr_t H5FD_sb_encode(H5FD_t *file, char *name/*out*/, uint8_t *buf);
@@ -159,17 +167,9 @@ H5_DLL haddr_t H5FD_get_maxaddr(const H5FD_t *file);
H5_DLL herr_t H5FD_get_feature_flags(const H5FD_t *file, unsigned long *feature_flags);
H5_DLL herr_t H5FD_set_feature_flags(H5FD_t *file, unsigned long feature_flags);
H5_DLL herr_t H5FD_get_fs_type_map(const H5FD_t *file, H5FD_mem_t *type_map);
-H5_DLL herr_t H5FD_read(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, H5FD_mem_t type,
+H5_DLL herr_t H5FD_read(H5FD_io_info_t *fdio_info, H5FD_mem_t type,
haddr_t addr, size_t size, void *buf/*out*/);
-H5_DLL herr_t H5FD_write(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, H5FD_mem_t type,
+H5_DLL herr_t H5FD_write(const H5FD_io_info_t *fdio_info, H5FD_mem_t type,
haddr_t addr, size_t size, const void *buf);
H5_DLL herr_t H5FD_flush(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
H5_DLL herr_t H5FD_truncate(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
diff --git a/src/H5Faccum.c b/src/H5Faccum.c
index 48f9bdd..7f3bb39 100644
--- a/src/H5Faccum.c
+++ b/src/H5Faccum.c
@@ -112,18 +112,25 @@ H5FL_BLK_DEFINE_STATIC(meta_accum);
*-------------------------------------------------------------------------
*/
herr_t
-H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr,
+H5F__accum_read(const H5F_io_info2_t *fio_info, H5FD_mem_t map_type, haddr_t addr,
size_t size, void *buf/*out*/)
{
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
HDassert(fio_info);
HDassert(fio_info->f);
- HDassert(fio_info->dxpl);
+ HDassert(fio_info->meta_dxpl);
+ HDassert(fio_info->raw_dxpl);
HDassert(buf);
+ /* Translate to file driver I/O info object */
+ fdio_info.file = fio_info->f->shared->lf;
+ fdio_info.meta_dxpl = fio_info->meta_dxpl;
+ fdio_info.raw_dxpl = fio_info->raw_dxpl;
+
/* Check if this information is in the metadata accumulator */
if((fio_info->f->shared->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA) && map_type != H5FD_MEM_DRAW) {
H5F_meta_accum_t *accum; /* Alias for file's metadata accumulator */
@@ -178,7 +185,7 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr
accum->dirty_off += amount_before;
/* Dispatch to driver */
- if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, amount_before, accum->buf) < 0)
+ if(H5FD_read(&fdio_info, map_type, addr, amount_before, accum->buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "driver read request failed")
} /* end if */
else
@@ -192,7 +199,7 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr
H5_CHECKED_ASSIGN(amount_after, size_t, ((addr + size) - (accum->loc + accum->size)), hsize_t);
/* Dispatch to driver */
- if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, (accum->loc + accum->size), amount_after, (accum->buf + accum->size + amount_before)) < 0)
+ if(H5FD_read(&fdio_info, map_type, (accum->loc + accum->size), amount_after, (accum->buf + accum->size + amount_before)) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "driver read request failed")
} /* end if */
@@ -206,13 +213,13 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr
/* Current read doesn't overlap with metadata accumulator, read it from file */
else {
/* Dispatch to driver */
- if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0)
+ if(H5FD_read(&fdio_info, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "driver read request failed")
} /* end else */
} /* end if */
else {
/* Read the data */
- if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0)
+ if(H5FD_read(&fdio_info, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "driver read request failed")
/* Check for overlap w/dirty accumulator */
@@ -255,7 +262,7 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr
} /* end if */
else {
/* Read the data */
- if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0)
+ if(H5FD_read(&fdio_info, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "driver read request failed")
} /* end else */
@@ -278,7 +285,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5F__accum_adjust(H5F_meta_accum_t *accum, const H5F_io_info_t *fio_info,
+H5F__accum_adjust(H5F_meta_accum_t *accum, const H5FD_io_info_t *fdio_info,
H5F_accum_adjust_t adjust, size_t size)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -286,7 +293,7 @@ H5F__accum_adjust(H5F_meta_accum_t *accum, const H5F_io_info_t *fio_info,
FUNC_ENTER_STATIC
HDassert(accum);
- HDassert(fio_info);
+ HDassert(fdio_info);
HDassert(H5F_ACCUM_APPEND == adjust || H5F_ACCUM_PREPEND == adjust);
HDassert(size > 0);
HDassert(size <= H5F_ACCUM_MAX_SIZE);
@@ -344,7 +351,7 @@ H5F__accum_adjust(H5F_meta_accum_t *accum, const H5F_io_info_t *fio_info,
/* Check if the dirty region overlaps the region to eliminate from the accumulator */
if((accum->size - shrink_size) < (accum->dirty_off + accum->dirty_len)) {
/* Write out the dirty region from the metadata accumulator, with dispatch to driver */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, (accum->loc + accum->dirty_off), accum->dirty_len, (accum->buf + accum->dirty_off)) < 0)
+ if(H5FD_write(fdio_info, H5FD_MEM_DEFAULT, (accum->loc + accum->dirty_off), accum->dirty_len, (accum->buf + accum->dirty_off)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "file write failed")
/* Reset accumulator dirty flag */
@@ -355,7 +362,7 @@ H5F__accum_adjust(H5F_meta_accum_t *accum, const H5F_io_info_t *fio_info,
/* Check if the dirty region overlaps the region to eliminate from the accumulator */
if(shrink_size > accum->dirty_off) {
/* Write out the dirty region from the metadata accumulator, with dispatch to driver */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, (accum->loc + accum->dirty_off), accum->dirty_len, (accum->buf + accum->dirty_off)) < 0)
+ if(H5FD_write(fdio_info, H5FD_MEM_DEFAULT, (accum->loc + accum->dirty_off), accum->dirty_len, (accum->buf + accum->dirty_off)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "file write failed")
/* Reset accumulator dirty flag */
@@ -417,9 +424,10 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr,
+H5F__accum_write(const H5F_io_info2_t *fio_info, H5FD_mem_t map_type, haddr_t addr,
size_t size, const void *buf)
{
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -427,9 +435,15 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
HDassert(fio_info);
HDassert(fio_info->f);
HDassert(H5F_INTENT(fio_info->f) & H5F_ACC_RDWR);
- HDassert(fio_info->dxpl);
+ HDassert(fio_info->meta_dxpl);
+ HDassert(fio_info->raw_dxpl);
HDassert(buf);
+ /* Translate to file driver I/O info object */
+ fdio_info.file = fio_info->f->shared->lf;
+ fdio_info.meta_dxpl = fio_info->meta_dxpl;
+ fdio_info.raw_dxpl = fio_info->raw_dxpl;
+
/* Check for accumulating metadata */
if((fio_info->f->shared->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA) && map_type != H5FD_MEM_DRAW) {
H5F_meta_accum_t *accum; /* Alias for file's metadata accumulator */
@@ -446,7 +460,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
/* Check if the new metadata adjoins the beginning of the current accumulator */
if((addr + size) == accum->loc) {
/* Check if we need to adjust accumulator size */
- if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_PREPEND, size) < 0)
+ if(H5F__accum_adjust(accum, &fdio_info, H5F_ACCUM_PREPEND, size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
/* Move the existing metadata to the proper location */
@@ -471,7 +485,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
/* Check if the new metadata adjoins the end of the current accumulator */
else if(addr == (accum->loc + accum->size)) {
/* Check if we need to adjust accumulator size */
- if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_APPEND, size) < 0)
+ if(H5F__accum_adjust(accum, &fdio_info, H5F_ACCUM_APPEND, size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
/* Copy the new metadata to the end */
@@ -531,7 +545,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
H5_CHECKED_ASSIGN(add_size, size_t, (accum->loc - addr), hsize_t);
/* Check if we need to adjust accumulator size */
- if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_PREPEND, add_size) < 0)
+ if(H5F__accum_adjust(accum, &fdio_info, H5F_ACCUM_PREPEND, add_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
/* Calculate the proper offset of the existing metadata */
@@ -571,7 +585,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
H5_CHECKED_ASSIGN(add_size, size_t, (addr + size) - (accum->loc + accum->size), hsize_t);
/* Check if we need to adjust accumulator size */
- if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_APPEND, add_size) < 0)
+ if(H5F__accum_adjust(accum, &fdio_info, H5F_ACCUM_APPEND, add_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
/* Compute offset of dirty region (after adjusting accumulator) */
@@ -637,7 +651,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
else {
/* Write out the existing metadata accumulator, with dispatch to driver */
if(accum->dirty) {
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, accum->loc + accum->dirty_off, accum->dirty_len, accum->buf + accum->dirty_off) < 0)
+ if(H5FD_write(&fdio_info, H5FD_MEM_DEFAULT, accum->loc + accum->dirty_off, accum->dirty_len, accum->buf + accum->dirty_off) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
/* Reset accumulator dirty flag */
@@ -733,7 +747,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
HGOTO_ERROR(H5E_IO, H5E_CANTRESET, FAIL, "can't reset accumulator")
/* Write the data */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0)
+ if(H5FD_write(&fdio_info, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
/* Check for overlap w/accumulator */
@@ -818,7 +832,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
} /* end if */
else {
/* Write the data */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0)
+ if(H5FD_write(&fdio_info, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
} /* end else */
@@ -842,10 +856,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t addr,
+H5F__accum_free(const H5F_io_info2_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t addr,
hsize_t size)
{
H5F_meta_accum_t *accum; /* Alias for file's metadata accumulator */
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -853,11 +868,17 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, h
/* check arguments */
HDassert(fio_info);
HDassert(fio_info->f);
- HDassert(fio_info->dxpl);
+ HDassert(fio_info->meta_dxpl);
+ HDassert(fio_info->raw_dxpl);
/* Set up alias for file's metadata accumulator info */
accum = &fio_info->f->shared->accum;
+ /* Translate to file driver I/O info object */
+ fdio_info.file = fio_info->f->shared->lf;
+ fdio_info.meta_dxpl = fio_info->meta_dxpl;
+ fdio_info.raw_dxpl = fio_info->raw_dxpl;
+
/* Adjust the metadata accumulator to remove the freed block, if it overlaps */
if((fio_info->f->shared->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA)
&& H5F_addr_overlap(addr, size, accum->loc, accum->size)) {
@@ -930,7 +951,7 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, h
/* Check if block to free is entirely before dirty region */
if(H5F_addr_le(tail_addr, dirty_start)) {
/* Write out the entire dirty region of the accumulator */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, dirty_start, accum->dirty_len, accum->buf + accum->dirty_off) < 0)
+ if(H5FD_write(&fdio_info, H5FD_MEM_DEFAULT, dirty_start, accum->dirty_len, accum->buf + accum->dirty_off) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
} /* end if */
/* Block to free overlaps with some/all of dirty region */
@@ -945,7 +966,7 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, h
HDassert(write_size > 0);
/* Write out the unfreed dirty region of the accumulator */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, dirty_start + dirty_delta, write_size, accum->buf + accum->dirty_off + dirty_delta) < 0)
+ if(H5FD_write(&fdio_info, H5FD_MEM_DEFAULT, dirty_start + dirty_delta, write_size, accum->buf + accum->dirty_off + dirty_delta) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
} /* end if */
@@ -965,7 +986,7 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, h
HDassert(write_size > 0);
/* Write out the unfreed end of the dirty region of the accumulator */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, dirty_start + dirty_delta, write_size, accum->buf + accum->dirty_off + dirty_delta) < 0)
+ if(H5FD_write(&fdio_info, H5FD_MEM_DEFAULT, dirty_start + dirty_delta, write_size, accum->buf + accum->dirty_off + dirty_delta) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
} /* end if */
@@ -1006,7 +1027,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F__accum_flush(const H5F_io_info_t *fio_info)
+H5F__accum_flush(const H5F_io_info2_t *fio_info)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -1014,12 +1035,20 @@ H5F__accum_flush(const H5F_io_info_t *fio_info)
HDassert(fio_info);
HDassert(fio_info->f);
- HDassert(fio_info->dxpl);
+ HDassert(fio_info->meta_dxpl);
+ HDassert(fio_info->raw_dxpl);
/* Check if we need to flush out the metadata accumulator */
if((fio_info->f->shared->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA) && fio_info->f->shared->accum.dirty) {
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
+
+ /* Translate to file driver I/O info object */
+ fdio_info.file = fio_info->f->shared->lf;
+ fdio_info.meta_dxpl = fio_info->meta_dxpl;
+ fdio_info.raw_dxpl = fio_info->raw_dxpl;
+
/* Flush the metadata contents */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, fio_info->f->shared->accum.loc + fio_info->f->shared->accum.dirty_off, fio_info->f->shared->accum.dirty_len, fio_info->f->shared->accum.buf + fio_info->f->shared->accum.dirty_off) < 0)
+ if(H5FD_write(&fdio_info, H5FD_MEM_DEFAULT, fio_info->f->shared->accum.loc + fio_info->f->shared->accum.dirty_off, fio_info->f->shared->accum.dirty_len, fio_info->f->shared->accum.buf + fio_info->f->shared->accum.dirty_off) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
/* Reset the dirty flag */
@@ -1045,7 +1074,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F__accum_reset(const H5F_io_info_t *fio_info, hbool_t flush)
+H5F__accum_reset(const H5F_io_info2_t *fio_info, hbool_t flush)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -1053,7 +1082,6 @@ H5F__accum_reset(const H5F_io_info_t *fio_info, hbool_t flush)
HDassert(fio_info);
HDassert(fio_info->f);
- HDassert(fio_info->dxpl);
/* Flush any dirty data in accumulator, if requested */
if(flush)
diff --git a/src/H5Fint.c b/src/H5Fint.c
index d122357..3c98c8f 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -76,8 +76,8 @@ typedef struct H5F_olist_t {
static int H5F_get_objects_cb(void *obj_ptr, hid_t obj_id, void *key);
static herr_t H5F_build_actual_name(const H5F_t *f, const H5P_genplist_t *fapl,
const char *name, char ** /*out*/ actual_name);/* Declare a free list to manage the H5F_t struct */
-static herr_t H5F__flush_phase1(H5F_t *f, hid_t dxpl_id);
-static herr_t H5F__flush_phase2(H5F_t *f, hid_t dxpl_id, hbool_t closing);
+static herr_t H5F__flush_phase1(H5F_t *f, hid_t meta_dxpl_id);
+static herr_t H5F__flush_phase2(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t closing);
/*********************/
@@ -495,7 +495,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5F_is_hdf5
+ * Function: H5F__is_hdf5
*
* Purpose: Check the file signature to detect an HDF5 file.
*
@@ -509,17 +509,14 @@ done:
*
* Programmer: Unknown
*
- * Modifications:
- * Robb Matzke, 1999-08-02
- * Rewritten to use the virtual file layer.
*-------------------------------------------------------------------------
*/
htri_t
-H5F_is_hdf5(const char *name, hid_t dxpl_id)
+H5F__is_hdf5(const char *name, hid_t meta_dxpl_id, hid_t raw_dxpl_id)
{
H5FD_t *file = NULL; /* Low-level file struct */
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
haddr_t sig_addr; /* Addess of hdf5 file signature */
- H5P_genplist_t *xfer_plist= NULL; /* Dataset transfer property list object */
htri_t ret_value = FAIL; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -528,12 +525,15 @@ H5F_is_hdf5(const char *name, hid_t dxpl_id)
if(NULL == (file = H5FD_open(name, H5F_ACC_RDONLY, H5P_FILE_ACCESS_DEFAULT, HADDR_UNDEF)))
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to open file")
- /* Get the property list object */
- if(NULL == (xfer_plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ /* Set up the file driver info */
+ fdio_info.file = file;
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(raw_dxpl_id)))
HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object")
/* The file is an hdf5 file if the hdf5 file signature can be found */
- if(H5FD_locate_signature(file, xfer_plist, &sig_addr) < 0)
+ if(H5FD_locate_signature(&fdio_info, &sig_addr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "unable to locate file signature")
ret_value = (HADDR_UNDEF != sig_addr);
@@ -544,7 +544,7 @@ done:
HDONE_ERROR(H5E_IO, H5E_CANTCLOSEFILE, FAIL, "unable to close file")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5F_is_hdf5() */
+} /* end H5F__is_hdf5() */
/*-------------------------------------------------------------------------
@@ -796,7 +796,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5F_dest
+ * Function: H5F__dest
*
* Purpose: Destroys a file structure. This function flushes the cache
* but doesn't do any other cleanup other than freeing memory
@@ -812,11 +812,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
+H5F__dest(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t flush)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Sanity check */
HDassert(f);
@@ -824,14 +824,14 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
if(1 == f->shared->nrefs) {
int actype; /* metadata cache type (enum value) */
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
/* Flush at this point since the file will be closed (phase 1).
* Only try to flush the file if it was opened with write access, and if
* the caller requested a flush.
*/
if((H5F_ACC_RDWR & H5F_INTENT(f)) && flush)
- if(H5F__flush_phase1(f, dxpl_id) < 0)
+ if(H5F__flush_phase1(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush cached data (phase 1)")
@@ -839,7 +839,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
* This allows the cache to set up for creating a metadata cache
* image if this has been requested.
*/
- if(H5AC_prep_for_file_close(f, dxpl_id) < 0)
+ if(H5AC_prep_for_file_close(f, meta_dxpl_id) < 0)
/* Push error, but keep going */
HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "metadata cache prep for close failed")
@@ -848,7 +848,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
* the caller requested a flush.
*/
if((H5F_ACC_RDWR & H5F_INTENT(f)) && flush)
- if(H5F__flush_phase2(f, dxpl_id, TRUE) < 0)
+ if(H5F__flush_phase2(f, meta_dxpl_id, raw_dxpl_id, TRUE) < 0)
/* Push error, but keep going */
HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush cached data (phase 2)")
@@ -895,7 +895,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
* -- JRM
*/
if(H5F_ACC_RDWR & H5F_INTENT(f)) {
- if(H5MF_close(f, dxpl_id) < 0)
+ if(H5MF_close(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't release file free space info")
@@ -913,7 +913,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
f->shared->sblock->status_flags &= (uint8_t)(~H5F_SUPER_SWMR_WRITE_ACCESS);
/* Mark EOA info dirty in cache, so change will get encoded */
- if(H5F_eoa_dirty(f, dxpl_id) < 0)
+ if(H5F_eoa_dirty(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
@@ -924,12 +924,12 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
* At most, this should change the superblock or the
* superblock extension messages.
*/
- if(H5MF_free_aggrs(f, dxpl_id) < 0)
+ if(H5MF_free_aggrs(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't release file space")
/* Truncate the file to the current allocated size */
- if(H5FD_truncate(f->shared->lf, dxpl_id, TRUE) < 0)
+ if(H5FD_truncate(f->shared->lf, meta_dxpl_id, TRUE) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "low level truncate failed")
@@ -968,7 +968,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "problems closing file")
/* Shutdown the metadata cache */
- if(H5AC_dest(f, dxpl_id))
+ if(H5AC_dest(f, meta_dxpl_id))
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "problems closing file")
@@ -990,7 +990,9 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
/* Set up I/O info for operation */
fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
+ HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
/* Destroy other components of the file */
@@ -1130,7 +1132,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
*/
H5F_t *
H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
- hid_t dxpl_id)
+ hid_t meta_dxpl_id)
{
H5F_t *file = NULL; /*the success return value */
H5F_file_t *shared = NULL; /*shared part of `file' */
@@ -1139,6 +1141,7 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
H5FD_class_t *drvr; /*file driver class info */
H5P_genplist_t *a_plist; /*file access property list */
H5F_close_degree_t fc_degree; /*file close degree */
+ hid_t raw_dxpl_id = H5AC_rawdata_dxpl_id; /* Raw data dxpl used by library */
hbool_t set_flag = FALSE; /*set the status_flags in the superblock */
hbool_t clear = FALSE; /*clear the status_flags */
hbool_t evict_on_close; /* evict on close value from plist */
@@ -1301,24 +1304,23 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
/* Initialize information about the superblock and allocate space for it */
/* (Writes superblock extension messages, if there are any) */
- if(H5F__super_init(file, dxpl_id) < 0)
+ if(H5F__super_init(file, meta_dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to allocate file superblock")
/* Create and open the root group */
/* (This must be after the space for the superblock is allocated in
* the file, since the superblock must be at offset 0)
*/
- if(H5G_mkroot(file, dxpl_id, TRUE) < 0)
+ if(H5G_mkroot(file, meta_dxpl_id, TRUE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to create/open root group")
} /* end if */
else if (1 == shared->nrefs) {
-
/* Read the superblock if it hasn't been read before. */
- if(H5F__super_read(file, dxpl_id, TRUE) < 0)
+ if(H5F__super_read(file, meta_dxpl_id, raw_dxpl_id, TRUE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_READERROR, NULL, "unable to read superblock")
/* Open the root group */
- if(H5G_mkroot(file, dxpl_id, FALSE) < 0)
+ if(H5G_mkroot(file, meta_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to read root group")
} /* end if */
@@ -1360,12 +1362,11 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
*/
if(H5P_get(a_plist, H5F_ACS_EVICT_ON_CLOSE_FLAG_NAME, &evict_on_close) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get evict on close value")
-
- if(shared->nrefs == 1) {
+ if(shared->nrefs == 1)
shared->evict_on_close = evict_on_close;
- } else if(shared->nrefs > 1) {
+ else if(shared->nrefs > 1) {
if(shared->evict_on_close != evict_on_close)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "file evict-on-close value doesn't match")
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "file evict-on-close value doesn't match")
} /* end if */
/* Formulate the absolute path for later search of target file for external links */
@@ -1393,7 +1394,7 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
/* Flush the superblock */
if(H5F_super_dirty(file) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, NULL, "unable to mark superblock as dirty")
- if(H5F_flush_tagged_metadata(file, H5AC__SUPERBLOCK_TAG, H5AC_ind_read_dxpl_id) < 0)
+ if(H5F_flush_tagged_metadata(file, H5AC__SUPERBLOCK_TAG, meta_dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, NULL, "unable to flush superblock")
/* Remove the file lock for SWMR_WRITE */
@@ -1405,7 +1406,6 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
else { /* H5F_ACC_RDONLY: check consistency of status_flags */
/* Skip check of status_flags for file with < superblock version 3 */
if(file->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_3) {
-
if(H5F_INTENT(file) & H5F_ACC_SWMR_READ) {
if((file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS &&
!(file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS))
@@ -1413,12 +1413,10 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
(!(file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS) &&
file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is not already open for SWMR writing")
-
} /* end if */
else if((file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS) ||
(file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is already open for write (may use <h5clear file> to clear file consistency flags)")
-
} /* version 3 superblock */
} /* end else */
} /* end if set_flag */
@@ -1428,7 +1426,7 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
done:
if((NULL == ret_value) && file)
- if(H5F_dest(file, dxpl_id, FALSE) < 0)
+ if(H5F__dest(file, meta_dxpl_id, raw_dxpl_id, FALSE) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, NULL, "problems closing file")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F_open() */
@@ -1448,7 +1446,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5F__flush_phase1(H5F_t *f, hid_t dxpl_id)
+H5F__flush_phase1(H5F_t *f, hid_t meta_dxpl_id)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -1458,7 +1456,7 @@ H5F__flush_phase1(H5F_t *f, hid_t dxpl_id)
HDassert(f);
/* Flush any cached dataset storage raw data */
- if(H5D_flush(f, dxpl_id) < 0)
+ if(H5D_flush(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush dataset cache")
@@ -1468,7 +1466,7 @@ H5F__flush_phase1(H5F_t *f, hid_t dxpl_id)
/* (needs to happen before cache flush, with superblock write, since the
* 'eoa' value is written in superblock -QAK)
*/
- if(H5MF_free_aggrs(f, dxpl_id) < 0)
+ if(H5MF_free_aggrs(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't release file space")
@@ -1490,9 +1488,9 @@ H5F__flush_phase1(H5F_t *f, hid_t dxpl_id)
*-------------------------------------------------------------------------
*/
static herr_t
-H5F__flush_phase2(H5F_t *f, hid_t dxpl_id, hbool_t closing)
+H5F__flush_phase2(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t closing)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1501,23 +1499,26 @@ H5F__flush_phase2(H5F_t *f, hid_t dxpl_id, hbool_t closing)
HDassert(f);
/* Flush the entire metadata cache */
- if(H5AC_flush(f, dxpl_id) < 0)
+ if(H5AC_flush(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush metadata cache")
/* Truncate the file to the current allocated size */
- if(H5FD_truncate(f->shared->lf, dxpl_id, closing) < 0)
+ if(H5FD_truncate(f->shared->lf, meta_dxpl_id, closing) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "low level truncate failed")
/* Flush the entire metadata cache again since the EOA could have changed in the truncate call. */
- if(H5AC_flush(f, dxpl_id) < 0)
+ if(H5AC_flush(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush metadata cache")
/* Set up I/O info for operation */
fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(raw_dxpl_id)))
/* Push error, but keep going*/
HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
@@ -1527,7 +1528,7 @@ H5F__flush_phase2(H5F_t *f, hid_t dxpl_id, hbool_t closing)
HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush metadata accumulator")
/* Flush file buffers to disk. */
- if(H5FD_flush(f->shared->lf, dxpl_id, closing) < 0)
+ if(H5FD_flush(f->shared->lf, meta_dxpl_id, closing) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "low level flush failed")
@@ -1536,7 +1537,7 @@ H5F__flush_phase2(H5F_t *f, hid_t dxpl_id, hbool_t closing)
/*-------------------------------------------------------------------------
- * Function: H5F_flush
+ * Function: H5F__flush
*
* Purpose: Flushes cached data.
*
@@ -1549,28 +1550,27 @@ H5F__flush_phase2(H5F_t *f, hid_t dxpl_id, hbool_t closing)
*-------------------------------------------------------------------------
*/
herr_t
-H5F_flush(H5F_t *f, hid_t dxpl_id, hbool_t closing)
+H5F__flush(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t closing)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_PACKAGE
/* Sanity check arguments */
HDassert(f);
/* First phase of flushing data */
- if(H5F__flush_phase1(f, dxpl_id) < 0)
+ if(H5F__flush_phase1(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush file data")
/* Second phase of flushing data */
- if(H5F__flush_phase2(f, dxpl_id, closing) < 0)
+ if(H5F__flush_phase2(f, meta_dxpl_id, raw_dxpl_id, closing) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush file data")
-done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5F_flush() */
+} /* end H5F__flush() */
/*-------------------------------------------------------------------------
@@ -1795,7 +1795,7 @@ H5F_try_close(H5F_t *f, hbool_t *was_closed /*out*/)
if(H5F_efc_try_close(f) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't attempt to close EFC")
- /* Delay flush until the shared file struct is closed, in H5F_dest. If the
+ /* Delay flush until the shared file struct is closed, in H5F__dest. If the
* application called H5Fclose, it would have been flushed in that function
* (unless it will have been flushed in H5F_dest anyways). */
@@ -1804,7 +1804,7 @@ H5F_try_close(H5F_t *f, hbool_t *was_closed /*out*/)
* shared H5F_file_t struct. If the reference count for the H5F_file_t
* struct reaches zero then destroy it also.
*/
- if(H5F_dest(f, H5AC_ind_read_dxpl_id, TRUE) < 0)
+ if(H5F__dest(f, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, TRUE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, "problems closing file")
/* Since we closed the file, this should be set to TRUE */
@@ -2362,7 +2362,8 @@ H5F_set_store_msg_crt_idx(H5F_t *f, hbool_t flag)
*-------------------------------------------------------------------------
*/
ssize_t
-H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len, hid_t dxpl_id)
+H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len, hid_t meta_dxpl_id,
+ hid_t raw_dxpl_id)
{
H5FD_t *fd_ptr; /* file driver */
haddr_t eoa; /* End of file address */
@@ -2429,10 +2430,10 @@ H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len, hid_t dxpl_id)
/* test to see if a buffer was provided -- if not, we are done */
if(buf_ptr != NULL) {
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
size_t space_needed; /* size of file image */
hsize_t tmp;
size_t tmp_size;
- H5P_genplist_t *xfer_plist= NULL; /* Dataset transfer property list object */
/* Check for buffer too small */
if((haddr_t)buf_len < eoa)
@@ -2440,13 +2441,16 @@ H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len, hid_t dxpl_id)
space_needed = (size_t)eoa;
- /* Get the property list object */
- if(NULL == (xfer_plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object")
+ /* Set up file driver I/O info object */
+ fdio_info.file = fd_ptr;
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get property list object")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(raw_dxpl_id)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get property list object")
/* read in the file image */
/* (Note compensation for base address addition in internal routine) */
- if(H5FD_read(fd_ptr, xfer_plist, H5FD_MEM_DEFAULT, 0, space_needed, buf_ptr) < 0)
+ if(H5FD_read(&fdio_info, H5FD_MEM_DEFAULT, 0, space_needed, buf_ptr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_READERROR, FAIL, "file image read request failed")
/* Offset to "status_flags" in the superblock */
diff --git a/src/H5Fio.c b/src/H5Fio.c
index afe1278..d40483f 100644
--- a/src/H5Fio.c
+++ b/src/H5Fio.c
@@ -96,15 +96,11 @@ herr_t
H5F_block_read(const H5F_t *f, H5FD_mem_t type, haddr_t addr, size_t size,
hid_t dxpl_id, void *buf/*out*/)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
H5FD_mem_t map_type; /* Mapped memory type */
- hid_t my_dxpl_id = dxpl_id; /* transfer property to use for I/O */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
-#ifdef QAK
-HDfprintf(stderr, "%s: read from addr = %a, size = %Zu\n", FUNC, addr, size);
-#endif /* QAK */
HDassert(f);
HDassert(f->shared);
@@ -118,16 +114,20 @@ HDfprintf(stderr, "%s: read from addr = %a, size = %Zu\n", FUNC, addr, size);
/* Treat global heap as raw data */
map_type = (type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type;
-#ifdef H5_DEBUG_BUILD
- /* GHEAP type is treated as RAW, so update the dxpl type property too */
- if(H5FD_MEM_GHEAP == type)
- my_dxpl_id = H5AC_rawdata_dxpl_id;
-#endif /* H5_DEBUG_BUILD */
-
- /* Set up I/O info for operation */
+ /* Set up the I/O info object */
fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(my_dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(H5FD_MEM_DRAW == type) {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end if */
+ else {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end else */
/* Pass through metadata accumulator layer */
if(H5F__accum_read(&fio_info, map_type, addr, size, buf) < 0)
@@ -157,15 +157,11 @@ herr_t
H5F_block_write(const H5F_t *f, H5FD_mem_t type, haddr_t addr, size_t size,
hid_t dxpl_id, const void *buf)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
H5FD_mem_t map_type; /* Mapped memory type */
- hid_t my_dxpl_id = dxpl_id; /* transfer property to use for I/O */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
-#ifdef QAK
-HDfprintf(stderr, "%s: write to addr = %a, size = %Zu\n", FUNC, addr, size);
-#endif /* QAK */
HDassert(f);
HDassert(f->shared);
@@ -180,16 +176,20 @@ HDfprintf(stderr, "%s: write to addr = %a, size = %Zu\n", FUNC, addr, size);
/* Treat global heap as raw data */
map_type = (type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type;
-#ifdef H5_DEBUG_BUILD
- /* GHEAP type is treated as RAW, so update the dxpl type property too */
- if(H5FD_MEM_GHEAP == type)
- my_dxpl_id = H5AC_rawdata_dxpl_id;
-#endif /* H5_DEBUG_BUILD */
-
- /* Set up I/O info for operation */
+ /* Set up the I/O info object */
fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(my_dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(H5FD_MEM_DRAW == type) {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end if */
+ else {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end else */
/* Pass through metadata accumulator layer */
if(H5F__accum_write(&fio_info, map_type, addr, size, buf) < 0)
@@ -216,7 +216,7 @@ done:
herr_t
H5F_flush_tagged_metadata(H5F_t * f, haddr_t tag, hid_t dxpl_id)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI(FAIL)
@@ -227,10 +227,10 @@ H5F_flush_tagged_metadata(H5F_t * f, haddr_t tag, hid_t dxpl_id)
/* Set up I/O info for operation */
fio_info.f = f;
-
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
-
/* Flush and reset the accumulator */
if(H5F__accum_reset(&fio_info, TRUE) < 0)
diff --git a/src/H5Fmount.c b/src/H5Fmount.c
index e3d4952..859b9d6 100644
--- a/src/H5Fmount.c
+++ b/src/H5Fmount.c
@@ -614,7 +614,7 @@ H5F_mount_count_ids(H5F_t *f, unsigned *nopen_files, unsigned *nopen_objs)
*-------------------------------------------------------------------------
*/
static herr_t
-H5F_flush_mounts_recurse(H5F_t *f, hid_t dxpl_id)
+H5F_flush_mounts_recurse(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id)
{
unsigned nerrors = 0; /* Errors from recursive flushes */
unsigned u; /* Index variable */
@@ -627,11 +627,11 @@ H5F_flush_mounts_recurse(H5F_t *f, hid_t dxpl_id)
/* Flush all child files, not stopping for errors */
for(u = 0; u < f->shared->mtab.nmounts; u++)
- if(H5F_flush_mounts_recurse(f->shared->mtab.child[u].file, dxpl_id) < 0)
+ if(H5F_flush_mounts_recurse(f->shared->mtab.child[u].file, meta_dxpl_id, raw_dxpl_id) < 0)
nerrors++;
/* Call the "real" flush routine, for this file */
- if(H5F_flush(f, dxpl_id, FALSE) < 0)
+ if(H5F__flush(f, meta_dxpl_id, raw_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information")
/* Check flush errors for children - errors are already on the stack */
@@ -656,7 +656,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F_flush_mounts(H5F_t *f, hid_t dxpl_id)
+H5F_flush_mounts(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -670,7 +670,7 @@ H5F_flush_mounts(H5F_t *f, hid_t dxpl_id)
f = f->parent;
/* Flush the mounted file hierarchy */
- if(H5F_flush_mounts_recurse(f, dxpl_id) < 0)
+ if(H5F_flush_mounts_recurse(f, meta_dxpl_id, raw_dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush mounted file hierarchy")
done:
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index 93a3978..aaa19d9 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -383,11 +383,12 @@ H5FL_EXTERN(H5F_file_t);
/* General routines */
H5F_t *H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id,
hid_t fapl_id, H5FD_t *lf);
-herr_t H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush);
-H5_DLL herr_t H5F_flush(H5F_t *f, hid_t dxpl_id, hbool_t closing);
-H5_DLL htri_t H5F_is_hdf5(const char *name, hid_t dxpl_id);
+herr_t H5F__dest(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t flush);
+H5_DLL herr_t H5F__flush(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t closing);
+H5_DLL htri_t H5F__is_hdf5(const char *name, hid_t meta_dxpl_id, hid_t raw_dxpl_id);
H5_DLL herr_t H5F_get_objects(const H5F_t *f, unsigned types, size_t max_index, hid_t *obj_id_list, hbool_t app_ref, size_t *obj_id_count_ptr);
-H5_DLL ssize_t H5F_get_file_image(H5F_t *f, void *buf_ptr, size_t buf_len, hid_t dxpl_id);
+H5_DLL ssize_t H5F_get_file_image(H5F_t *f, void *buf_ptr, size_t buf_len,
+ hid_t meta_dxpl_id, hid_t raw_dxpl_id);
H5_DLL herr_t H5F_close(H5F_t *f);
/* File mount related routines */
@@ -397,7 +398,8 @@ H5_DLL herr_t H5F_mount_count_ids(H5F_t *f, unsigned *nopen_files, unsigned *nop
/* Superblock related routines */
H5_DLL herr_t H5F__super_init(H5F_t *f, hid_t dxpl_id);
-H5_DLL herr_t H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read);
+H5_DLL herr_t H5F__super_read(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id,
+ hbool_t initial_read);
H5_DLL herr_t H5F__super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext_size);
H5_DLL herr_t H5F__super_free(H5F_super_t *sblock);
@@ -410,14 +412,14 @@ H5_DLL herr_t H5F_super_ext_close(H5F_t *f, H5O_loc_t *ext_ptr, hid_t dxpl_id,
hbool_t was_created);
/* Metadata accumulator routines */
-H5_DLL herr_t H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t type,
+H5_DLL herr_t H5F__accum_read(const H5F_io_info2_t *fio_info, H5FD_mem_t type,
haddr_t addr, size_t size, void *buf);
-H5_DLL herr_t H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t type,
+H5_DLL herr_t H5F__accum_write(const H5F_io_info2_t *fio_info, H5FD_mem_t type,
haddr_t addr, size_t size, const void *buf);
-H5_DLL herr_t H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t type,
+H5_DLL herr_t H5F__accum_free(const H5F_io_info2_t *fio_info, H5FD_mem_t type,
haddr_t addr, hsize_t size);
-H5_DLL herr_t H5F__accum_flush(const H5F_io_info_t *fio_info);
-H5_DLL herr_t H5F__accum_reset(const H5F_io_info_t *fio_info, hbool_t flush);
+H5_DLL herr_t H5F__accum_flush(const H5F_io_info2_t *fio_info);
+H5_DLL herr_t H5F__accum_reset(const H5F_io_info2_t *fio_info, hbool_t flush);
/* Shared file list related routines */
H5_DLL herr_t H5F_sfile_add(H5F_file_t *shared);
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index 7d288fa..f980347 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -628,12 +628,20 @@ typedef struct H5F_object_flush_t {
void *udata; /* User data */
} H5F_object_flush_t;
-/* I/O Info for an operation */
+/* I/O Info for an operation (old) */
typedef struct H5F_io_info_t {
const H5F_t *f; /* File object */
const struct H5P_genplist_t *dxpl; /* DXPL object */
} H5F_io_info_t;
+/* I/O Info for an operation */
+/* (Migrate toward this one, so that both raw data & metadata DXPLs are available) */
+typedef struct H5F_io_info2_t {
+ const H5F_t *f; /* File object */
+ const struct H5P_genplist_t *meta_dxpl; /* Metadata DXPL object */
+ const struct H5P_genplist_t *raw_dxpl; /* Raw data DXPL object */
+} H5F_io_info2_t;
+
/* Concise info about a block of bytes in a file */
typedef struct H5F_block_t {
haddr_t offset; /* Offset of the block in the file */
@@ -723,7 +731,7 @@ H5_DLL herr_t H5F_get_vfd_handle(const H5F_t *file, hid_t fapl, void **file_hand
H5_DLL hbool_t H5F_is_mount(const H5F_t *file);
H5_DLL hbool_t H5F_has_mount(const H5F_t *file);
H5_DLL herr_t H5F_traverse_mount(struct H5O_loc_t *oloc/*in,out*/);
-H5_DLL herr_t H5F_flush_mounts(H5F_t *f, hid_t dxpl_id);
+H5_DLL herr_t H5F_flush_mounts(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id);
/* Functions that operate on blocks of bytes wrt super block */
H5_DLL herr_t H5F_block_read(const H5F_t *f, H5FD_mem_t type, haddr_t addr,
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index 3b86dae..3050a28 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -320,13 +320,14 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
+H5F__super_read(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t initial_read)
{
H5P_genplist_t *dxpl = NULL; /* DXPL object */
H5AC_ring_t ring, orig_ring = H5AC_RING_INV;
H5F_super_t * sblock = NULL; /* Superblock structure */
H5F_superblock_cache_ud_t udata; /* User data for cache callbacks */
H5P_genplist_t *c_plist; /* File creation property list */
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
unsigned sblock_flags = H5AC__NO_FLAGS_SET; /* flags used in superblock unprotect call */
haddr_t super_addr; /* Absolute address of superblock */
haddr_t eof; /* End of file address */
@@ -334,7 +335,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
hbool_t skip_eof_check = FALSE; /* Whether to skip checking the EOF value */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
+ FUNC_ENTER_PACKAGE_TAG(meta_dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
/* initialize the drvinfo to NULL -- we will overwrite this if there
* is a driver information block
@@ -342,13 +343,19 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
f->shared->drvinfo = NULL;
/* Get the DXPL plist object for DXPL ID */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
if((H5P_get(dxpl, H5AC_RING_NAME, &orig_ring)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get property value");
+ /* Set up file driver I/O info */
+ fdio_info.file = f->shared->lf;
+ fdio_info.meta_dxpl = dxpl;
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(raw_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+
/* Find the superblock */
- if(H5FD_locate_signature(f->shared->lf, dxpl, &super_addr) < 0)
+ if(H5FD_locate_signature(&fdio_info, &super_addr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "unable to locate file signature")
if(HADDR_UNDEF == super_addr)
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "file signature not found")
@@ -396,7 +403,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set property value");
/* Look up the superblock */
- if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &udata, rw_flags)))
+ if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, meta_dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &udata, rw_flags)))
HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load superblock")
if(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)
@@ -556,7 +563,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "set end of space allocation request failed")
/* Look up the driver info block */
- if(NULL == (drvinfo = (H5O_drvinfo_t *)H5AC_protect(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, &drvrinfo_udata, rw_flags)))
+ if(NULL == (drvinfo = (H5O_drvinfo_t *)H5AC_protect(f, meta_dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, &drvrinfo_udata, rw_flags)))
HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load driver info block")
/* Loading the driver info block is enough to set up the right info */
@@ -571,7 +578,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
drvinfo_flags |= H5AC__PIN_ENTRY_FLAG;
/* Release the driver info block */
- if(H5AC_unprotect(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, drvinfo, drvinfo_flags) < 0)
+ if(H5AC_unprotect(f, meta_dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, drvinfo, drvinfo_flags) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTUNPROTECT, FAIL, "unable to release driver info block")
/* save a pointer to the driver information cache entry */
@@ -612,14 +619,14 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "unable to open file's superblock extension")
/* Check for the extension having a 'driver info' message */
- if((status = H5O_msg_exists(&ext_loc, H5O_DRVINFO_ID, dxpl_id)) < 0)
+ if((status = H5O_msg_exists(&ext_loc, H5O_DRVINFO_ID, meta_dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
if(status) {
/* Check for ignoring the driver info for this file */
if(!udata.ignore_drvrinfo) {
/* Retrieve the 'driver info' structure */
- if(NULL == H5O_msg_read(&ext_loc, H5O_DRVINFO_ID, &drvinfo, dxpl_id))
+ if(NULL == H5O_msg_read(&ext_loc, H5O_DRVINFO_ID, &drvinfo, meta_dxpl_id))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "driver info message not present")
/* Validate and decode driver information */
@@ -637,15 +644,15 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
} /* end if */
/* Read in the shared OH message information if there is any */
- if(H5SM_get_info(&ext_loc, c_plist, dxpl_id) < 0)
+ if(H5SM_get_info(&ext_loc, c_plist, meta_dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to read SOHM table information")
/* Check for the extension having a 'v1 B-tree "K"' message */
- if((status = H5O_msg_exists(&ext_loc, H5O_BTREEK_ID, dxpl_id)) < 0)
+ if((status = H5O_msg_exists(&ext_loc, H5O_BTREEK_ID, meta_dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
if(status) {
/* Retrieve the 'v1 B-tree "K"' structure */
- if(NULL == H5O_msg_read(&ext_loc, H5O_BTREEK_ID, &btreek, dxpl_id))
+ if(NULL == H5O_msg_read(&ext_loc, H5O_BTREEK_ID, &btreek, meta_dxpl_id))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "v1 B-tree 'K' info message not present")
/* Set non-default v1 B-tree 'K' value info from file */
@@ -661,13 +668,13 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
} /* end if */
/* Check for the extension having a 'free-space manager info' message */
- if((status = H5O_msg_exists(&ext_loc, H5O_FSINFO_ID, dxpl_id)) < 0)
+ if((status = H5O_msg_exists(&ext_loc, H5O_FSINFO_ID, meta_dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
if(status) {
H5O_fsinfo_t fsinfo; /* Free-space manager info message from superblock extension */
/* Retrieve the 'free-space manager info' structure */
- if(NULL == H5O_msg_read(&ext_loc, H5O_FSINFO_ID, &fsinfo, dxpl_id))
+ if(NULL == H5O_msg_read(&ext_loc, H5O_FSINFO_ID, &fsinfo, meta_dxpl_id))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get free-space manager info message")
/* Check for non-default info */
@@ -693,7 +700,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
} /* end if */
/* Check for the extension having a 'metadata cache image' message */
- if((status = H5O_msg_exists(&ext_loc, H5O_MDCI_MSG_ID, dxpl_id)) < 0)
+ if((status = H5O_msg_exists(&ext_loc, H5O_MDCI_MSG_ID, meta_dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
if(status) {
hbool_t rw = ((rw_flags & H5AC__READ_ONLY_FLAG) == 0);
@@ -712,7 +719,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
*/
/* Retrieve the 'metadata cache image message' structure */
- if(NULL == H5O_msg_read(&ext_loc, H5O_MDCI_MSG_ID, &mdci_msg, dxpl_id))
+ if(NULL == H5O_msg_read(&ext_loc, H5O_MDCI_MSG_ID, &mdci_msg, meta_dxpl_id))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get metadata cache image message")
/* Indicate to the cache that there's an image to load on first protect call */
@@ -721,7 +728,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
} /* end if */
/* Close superblock extension */
- if(H5F_super_ext_close(f, &ext_loc, dxpl_id, FALSE) < 0)
+ if(H5F_super_ext_close(f, &ext_loc, meta_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEOBJ, FAIL, "unable to close file's superblock extension")
} /* end if */
@@ -765,7 +772,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
HDassert(f->shared->sblock == NULL);
f->shared->sblock = sblock;
#endif /* JRM */
- if(H5F_super_ext_write_msg(f, dxpl_id, H5O_DRVINFO_ID, &drvinfo, FALSE, H5O_MSG_NO_FLAGS_SET) < 0)
+ if(H5F_super_ext_write_msg(f, meta_dxpl_id, H5O_DRVINFO_ID, &drvinfo, FALSE, H5O_MSG_NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
#if 1 /* bug fix test code -- tidy this up if all goes well */ /* JRM */
@@ -777,7 +784,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
/* Check for eliminating the driver info block */
else if(H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
/* Remove the driver info message from the superblock extension */
- if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_DRVINFO_ID) < 0)
+ if(H5F_super_ext_remove_msg(f, meta_dxpl_id, H5O_DRVINFO_ID) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "error in removing message from superblock extension")
/* Check if the superblock extension was removed */
@@ -795,7 +802,7 @@ done:
HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set property value")
/* Release the superblock */
- if(sblock && H5AC_unprotect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, sblock, sblock_flags) < 0)
+ if(sblock && H5AC_unprotect(f, meta_dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, sblock, sblock_flags) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTUNPROTECT, FAIL, "unable to close superblock")
/* If we have failed, make sure no entries are left in the
@@ -808,7 +815,7 @@ done:
HDONE_ERROR(H5E_FILE, H5E_CANTUNPIN, FAIL, "unable to unpin driver info")
/* Evict the driver info block from the cache */
- if(H5AC_expunge_entry(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, H5AC__NO_FLAGS_SET) < 0)
+ if(H5AC_expunge_entry(f, meta_dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge driver info block")
} /* end if */
@@ -819,7 +826,7 @@ done:
HDONE_ERROR(H5E_FILE, H5E_CANTUNPIN, FAIL, "unable to unpin superblock")
/* Evict the superblock from the cache */
- if(H5AC_expunge_entry(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, H5AC__NO_FLAGS_SET) < 0)
+ if(H5AC_expunge_entry(f, meta_dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge superblock")
} /* end if */
} /* end if */
diff --git a/src/H5MF.c b/src/H5MF.c
index 23f128f..36b9386 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -645,7 +645,7 @@ herr_t
H5MF_xfree(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, haddr_t addr,
hsize_t size)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
H5MF_free_section_t *node = NULL; /* Free space section pointer */
H5MF_sect_ud_t udata; /* User data for callback */
H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
@@ -688,8 +688,18 @@ HDfprintf(stderr, "%s: fs_type = %u\n", FUNC, (unsigned)fs_type);
/* Set up I/O info for operation */
fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(H5FD_MEM_DRAW == alloc_type) {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end if */
+ else {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end else */
/* Check if the space to free intersects with the file's metadata accumulator */
if(H5F__accum_free(&fio_info, alloc_type, addr, size) < 0)
diff --git a/src/H5Z.c b/src/H5Z.c
index e7a2186..0be0bbe 100644
--- a/src/H5Z.c
+++ b/src/H5Z.c
@@ -626,7 +626,7 @@ H5Z__flush_file_cb(void *obj_ptr, hid_t H5_ATTR_UNUSED obj_id, void H5_ATTR_UNUS
/* Call the flush routine for mounted file hierarchies. Do a global flush
* if the file is opened for write */
if(H5F_ACC_RDWR & H5F_INTENT((H5F_t *)obj_ptr)) {
- if(H5F_flush_mounts((H5F_t *)obj_ptr, H5AC_ind_read_dxpl_id) < 0)
+ if(H5F_flush_mounts((H5F_t *)obj_ptr, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFLUSH, FAIL, "unable to flush file hierarchy")
} /* end if */
diff --git a/test/accum.c b/test/accum.c
index 1fcd051..d134bbc 100644
--- a/test/accum.c
+++ b/test/accum.c
@@ -45,18 +45,18 @@
H5F_t * f = NULL;
/* Function Prototypes */
-unsigned test_write_read(const H5F_io_info_t *fio_info);
-unsigned test_write_read_nonacc_front(const H5F_io_info_t *fio_info);
-unsigned test_write_read_nonacc_end(const H5F_io_info_t *fio_info);
-unsigned test_accum_overlap(const H5F_io_info_t *fio_info);
-unsigned test_accum_overlap_clean(const H5F_io_info_t *fio_info);
-unsigned test_accum_overlap_size(const H5F_io_info_t *fio_info);
-unsigned test_accum_non_overlap_size(const H5F_io_info_t *fio_info);
-unsigned test_accum_adjust(const H5F_io_info_t *fio_info);
-unsigned test_read_after(const H5F_io_info_t *fio_info);
-unsigned test_free(const H5F_io_info_t *fio_info);
-unsigned test_big(const H5F_io_info_t *fio_info);
-unsigned test_random_write(const H5F_io_info_t *fio_info);
+unsigned test_write_read(const H5F_io_info2_t *fio_info);
+unsigned test_write_read_nonacc_front(const H5F_io_info2_t *fio_info);
+unsigned test_write_read_nonacc_end(const H5F_io_info2_t *fio_info);
+unsigned test_accum_overlap(const H5F_io_info2_t *fio_info);
+unsigned test_accum_overlap_clean(const H5F_io_info2_t *fio_info);
+unsigned test_accum_overlap_size(const H5F_io_info2_t *fio_info);
+unsigned test_accum_non_overlap_size(const H5F_io_info2_t *fio_info);
+unsigned test_accum_adjust(const H5F_io_info2_t *fio_info);
+unsigned test_read_after(const H5F_io_info2_t *fio_info);
+unsigned test_free(const H5F_io_info2_t *fio_info);
+unsigned test_big(const H5F_io_info2_t *fio_info);
+unsigned test_random_write(const H5F_io_info2_t *fio_info);
unsigned test_swmr_write_big(hbool_t newest_format);
/* Helper Function Prototypes */
@@ -90,7 +90,7 @@ void accum_printf(void);
int
main(void)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
unsigned nerrors = 0; /* track errors */
hid_t fid = -1;
@@ -109,7 +109,8 @@ main(void)
/* Set up I/O info for operation */
fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id))) FAIL_STACK_ERROR
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id))) FAIL_STACK_ERROR
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id))) FAIL_STACK_ERROR
/* Reset metadata accumulator for the file */
if(accum_reset(&fio_info) < 0) FAIL_STACK_ERROR
@@ -166,7 +167,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_write_read(const H5F_io_info_t *fio_info)
+test_write_read(const H5F_io_info2_t *fio_info)
{
int i = 0;
int *write_buf, *read_buf;
@@ -222,7 +223,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_write_read_nonacc_front(const H5F_io_info_t *fio_info)
+test_write_read_nonacc_front(const H5F_io_info2_t *fio_info)
{
int i = 0;
int *write_buf, *read_buf;
@@ -281,7 +282,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_write_read_nonacc_end(const H5F_io_info_t *fio_info)
+test_write_read_nonacc_end(const H5F_io_info2_t *fio_info)
{
int i = 0;
int *write_buf, *read_buf;
@@ -340,7 +341,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_free(const H5F_io_info_t *fio_info)
+test_free(const H5F_io_info2_t *fio_info)
{
int i = 0;
int32_t *wbuf = NULL;
@@ -527,7 +528,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_accum_overlap(const H5F_io_info_t *fio_info)
+test_accum_overlap(const H5F_io_info2_t *fio_info)
{
int i = 0;
int32_t *wbuf, *rbuf;
@@ -699,7 +700,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_accum_overlap_clean(const H5F_io_info_t *fio_info)
+test_accum_overlap_clean(const H5F_io_info2_t *fio_info)
{
int i = 0;
int32_t *wbuf, *rbuf;
@@ -878,7 +879,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_accum_non_overlap_size(const H5F_io_info_t *fio_info)
+test_accum_non_overlap_size(const H5F_io_info2_t *fio_info)
{
int i = 0;
int32_t *wbuf, *rbuf;
@@ -945,7 +946,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_accum_overlap_size(const H5F_io_info_t *fio_info)
+test_accum_overlap_size(const H5F_io_info2_t *fio_info)
{
int i = 0;
int32_t *wbuf, *rbuf;
@@ -1023,7 +1024,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_accum_adjust(const H5F_io_info_t *fio_info)
+test_accum_adjust(const H5F_io_info2_t *fio_info)
{
int i = 0;
int s = 1048576; /* size of buffer */
@@ -1279,7 +1280,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_read_after(const H5F_io_info_t *fio_info)
+test_read_after(const H5F_io_info2_t *fio_info)
{
int i = 0;
int s = 128; /* size of buffer */
@@ -1358,7 +1359,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_big(const H5F_io_info_t *fio_info)
+test_big(const H5F_io_info2_t *fio_info)
{
uint8_t *wbuf, *wbuf2, *rbuf, *zbuf; /* Buffers for reading & writing, etc */
unsigned u; /* Local index variable */
@@ -1666,7 +1667,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_random_write(const H5F_io_info_t *fio_info)
+test_random_write(const H5F_io_info2_t *fio_info)
{
uint8_t *wbuf, *rbuf; /* Buffers for reading & writing */
unsigned seed = 0; /* Random # seed */
@@ -1820,7 +1821,7 @@ test_swmr_write_big(hbool_t newest_format)
pid_t pid; /* Process ID */
#endif /* H5_HAVE_UNISTD_H */
int status; /* Status returned from child process */
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
char *new_argv[] = {NULL};
char *driver = NULL; /* VFD string (from env variable) */
@@ -1877,7 +1878,9 @@ test_swmr_write_big(hbool_t newest_format)
/* Set up I/O info for operation */
fio_info.f = rf;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ FAIL_STACK_ERROR
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
FAIL_STACK_ERROR
/* We'll be writing lots of garbage data, so extend the
diff --git a/test/cache.c b/test/cache.c
index 87b1272..c381776 100644
--- a/test/cache.c
+++ b/test/cache.c
@@ -9825,7 +9825,6 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
}
}
-
if(pass) {
/* Now load a large entry. This should result in the eviction
@@ -10370,14 +10369,28 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
* However, (VET 9)'s serialize function needs to modify (VET, 8),
* which is currently not in cache. Thus it calls H5C_protect(VET, 8)
* to gain access to it. H5C_protect(VET, 8) loads (VET, 8), and
- * then attempts to evict entries to make space for it. While (VET, 9)
- * is still at the bottom of the LRU, it is marked flush in progress
- * and this is skipped. Thus the next entries on the LRU are (MET, 0)
- * thru (MET, 30) and (LET, 0) thru (LET, 10) -- all of which are dirty,
+ * then attempts to evict entries to make space for it.
+ *
+ * However, H5C_make_space_in_cache() now exits without taking
+ * any action on re-entrant calls. Thus H5C_protect(VET, 8) simply
+ * loads the entry into the cache -- resulting in a cache that is
+ * 10 KB oversize. The subsequent unprotect puts (VET, 8) at the
+ * head of the LRU and marks it dirty.
+ *
+ * After (VET, 9) is serialized, it is flushed, and moved to the
+ * head of the LRU.
+ *
+ * At this point, the H5C_make_space_in_cache() call made by
+ * H5C_protect(LET, 11) now has 14 KB of space to make.
+ *
+ * The next entries on the LRU are (MET, 0) thru (MET, 30),
+ * (LET, 0) thru (LET, 10), and (VET, 8) -- all of which are dirty,
* and are therefore flushed and moved to the head of the LRU list.
*
* The next entry on the bottom of the LRU list is (VET, 0), which
- * is clean, and is therefore evicted to make space for (VET, 8).
+ * is clean, and is therefore evicted, leaving H5C_make_space_in_cache()
+ * with 4 KB of space to create.
+ *
* This space is sufficient, so H5C_protect(VET, 8) inserts
* (VET, 8) into the cache's index, marks it as protected, and
* returns to the serialize function for (VET, 9).
@@ -10386,22 +10399,10 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
* calls H5C_unprotect(VET, 8), which markes (VET, 8) as dirty and
* unprotected, and places it at the head of the LRU.
*
- * The serialize function for (VET, 9) then returns, and (VET, 9) is
- * is written to disk, marked clean, and moved to the head of the LRU.
+ * (VET, 0) is the next item on the LRU -- it is clean and is therefore
+ * evicted -- leaving 6 KB of free space after (LET, 11) is inserted
+ * into the cache.
*
- * At this point, the cache is still full (since (VET, 8) took the
- * space created by the eviction of (VET, 0)). Thus
- * H5C_protect(LET, 11) continues to look for space. While
- * (MET, 0) was the next item on the LRU list when it called the
- * serialize function for (VET, 9), the function notices that the
- * LRU has been modified, and restarts its search for candidates
- * for eviction at the bottom of the LRU.
- *
- * (MET, 0) is now at the bottom of the LRU, and is clean. Thus
- * it is evicted. This makes sufficient space for (LET, 11), so
- * H5C_protect(LET, 11) inserts it into the cache, marks it as
- * protected, and returns.
- *
* H5C_unprotect(LET, 11) marks (LET, 11) as unprotected, and then
* returns as well.
*
@@ -10427,9 +10428,9 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
*
* (VET, 7) N 5 KB N N - -
*
- * (VET, 8) Y 10 KB Y N - -
+ * (VET, 8) Y 10 KB N N - -
*
- * (VET, 9) Y 10 KB N N - -
+ * (VET, 9) N 10 KB N N - -
*
* Start by updating the expected table for the expected changes in
* entry status:
@@ -10448,25 +10449,22 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
expected[0].serialized = TRUE;
expected[0].destroyed = TRUE;
expected[8].in_cache = TRUE;
- expected[8].is_dirty = TRUE;
+ expected[8].is_dirty = FALSE;
expected[8].deserialized = TRUE;
- expected[8].serialized = FALSE;
+ expected[8].serialized = TRUE;
expected[8].destroyed = FALSE;
- expected[9].in_cache = TRUE;
+ expected[9].in_cache = FALSE;
expected[9].is_dirty = FALSE;
expected[9].serialized = TRUE;
- expected[9].destroyed = FALSE;
+ expected[9].destroyed = TRUE;
- expected[10].in_cache = FALSE;
+ expected[10].in_cache = TRUE;
expected[10].is_dirty = FALSE;
expected[10].serialized = TRUE;
- expected[10].destroyed = TRUE;
+ expected[10].destroyed = FALSE;
num_large_entries = 12;
- /* a newly loaded entry is not inserted in the cache until after
- * space has been made for it. Thus (LET, 11) will not be flushed.
- */
for (i = num_variable_entries;
i < num_variable_entries + num_monster_entries + num_large_entries - 1;
i++)
@@ -10484,10 +10482,10 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
/* verify cache size */
if((cache_ptr->index_len != 44) ||
(cache_ptr->index_size != (2 * 1024 * 1024) -
- (2 * VARIABLE_ENTRY_SIZE) -
- (10 * LARGE_ENTRY_SIZE)) ||
- (cache_ptr->index_size != ((2 * VARIABLE_ENTRY_SIZE) +
- (30 * MONSTER_ENTRY_SIZE) +
+ (2 * 1024) -
+ (1 * LARGE_ENTRY_SIZE)) ||
+ (cache_ptr->index_size != ((1 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
(12 * LARGE_ENTRY_SIZE)))) {
pass = FALSE;
@@ -10497,15 +10495,27 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
/* verify entry status */
verify_entry_status(cache_ptr,
9,
- (num_variable_entries + num_monster_entries + num_large_entries),
+ (num_variable_entries + num_monster_entries +
+ num_large_entries),
expected);
}
if(pass) {
- /* protect and unprotect VET 8 to move it to the top of the LRU */
+ /* protect and unprotect VET 9 to evict MET 0 */
+ protect_entry(file_ptr, VARIABLE_ENTRY_TYPE, 9);
+ unprotect_entry(file_ptr, VARIABLE_ENTRY_TYPE, 9, H5C__NO_FLAGS_SET);
+
+ /* protect and unprotect VET 8 to dirty it and move it to the
+ * top of the LRU. Since we are dirtying it again, reset its
+ * serialized flag.
+ */
+ base_addr = entries[VARIABLE_ENTRY_TYPE];
+ entry_ptr = &(base_addr[8]);
+ entry_ptr->serialized = FALSE;
+
protect_entry(file_ptr, VARIABLE_ENTRY_TYPE, 8);
- unprotect_entry(file_ptr, VARIABLE_ENTRY_TYPE, 8, H5C__NO_FLAGS_SET);
+ unprotect_entry(file_ptr, VARIABLE_ENTRY_TYPE, 8, H5C__DIRTIED_FLAG);
/* Again, touch all the non VARIABLE_ENTRY_TYPE entries in the
@@ -10517,7 +10527,13 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
/* skip MET 0 in first pass so that we evict VET 9 when we
* reload MET 0
+ *
+ * Since we are reloading MET 0, reset its destroyed flag.
*/
+ base_addr = entries[MONSTER_ENTRY_TYPE];
+ entry_ptr = &(base_addr[0]);
+ entry_ptr->destroyed = FALSE;
+
for (i = 1; i < num_monster_entries; i++)
{
protect_entry(file_ptr, MONSTER_ENTRY_TYPE, i);
@@ -10550,7 +10566,9 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
expected[i].is_dirty = TRUE;
}
- /* update MET 0 to set its in cache flag, and reset the its destroyed flag */
+ /* update MET 0 to set its in cache flag, and reset
+ * its destroyed flag
+ */
expected[10].in_cache = TRUE;
/* pass through non variable entries will flush VET 8, and evict VET 9.
@@ -10719,8 +10737,8 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
if((cache_ptr->insertions[VARIABLE_ENTRY_TYPE] != 0) ||
(cache_ptr->pinned_insertions[VARIABLE_ENTRY_TYPE] != 0) ||
(cache_ptr->clears[VARIABLE_ENTRY_TYPE] != 0) ||
- (cache_ptr->flushes[VARIABLE_ENTRY_TYPE] != 8) ||
- (cache_ptr->evictions[VARIABLE_ENTRY_TYPE] != 11) ||
+ (cache_ptr->flushes[VARIABLE_ENTRY_TYPE] != 9) ||
+ (cache_ptr->evictions[VARIABLE_ENTRY_TYPE] != 12) ||
(cache_ptr->take_ownerships[VARIABLE_ENTRY_TYPE] != 0) ||
(cache_ptr->moves[VARIABLE_ENTRY_TYPE] != 1) ||
(cache_ptr->entry_flush_moves[VARIABLE_ENTRY_TYPE] != 0) ||
diff --git a/test/cache_image.c b/test/cache_image.c
index de0507b..0e7928a 100644
--- a/test/cache_image.c
+++ b/test/cache_image.c
@@ -4560,7 +4560,7 @@ cache_image_smoke_check_4(void)
* If sufficient zoos have been created, continue to
* 10). Otherwise goto 5)
*
- * 10) Open the file.
+ * 10) Open the file R/O.
*
* Verify that the file contains a metadata cache
* image superblock extension message.
@@ -4571,14 +4571,23 @@ cache_image_smoke_check_4(void)
*
* 13) Open the file.
*
- * Verify that the file doesn't contain a metadata cache
+ * Verify that the file contains a metadata cache
* image superblock extension message.
*
* 14) Validate all the zoos.
*
* 15) Close the file.
*
- * 16) Delete the file.
+ * 16) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ *
+ * 17) Validate all the zoos.
+ *
+ * 18) Close the file.
+ *
+ * 19) Delete the file.
*
* Return: void
*
@@ -4821,9 +4830,56 @@ cache_image_smoke_check_5(void)
} /* end while */
cp += 5;
+ /* 10) Open the file read only.
+ *
+ * Verify that the file contains a metadata cache image
+ * superblock extension message.
+ */
+ if(pass) {
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if(show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Validate all the zoos. */
+ i = min_group;
+ while(pass && i <= max_group) {
+ sprintf(process_group_name, "/process_%d", i);
+ validate_zoo(file_id, process_group_name, i++);
+ }
-
- /* 10) Open the file.
+#if H5C_COLLECT_CACHE_STATS
+ if( pass) {
+ if(cache_ptr->images_loaded == 0) {
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if(show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 12) Close the file. */
+ if(pass) {
+ if(H5Fclose(file_id) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ /* 13) Open the file R/W.
*
* Verify that the file contains a metadata cache image
* superblock extension message.
@@ -4847,7 +4903,7 @@ cache_image_smoke_check_5(void)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 11) Validate all the zoos. */
+ /* 14) Validate all the zoos. */
i = min_group;
while ( ( pass ) && ( i <= max_group ) ) {
@@ -4871,7 +4927,7 @@ cache_image_smoke_check_5(void)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 12) Close the file. */
+ /* 15) Close the file. */
if ( pass ) {
@@ -4883,7 +4939,7 @@ cache_image_smoke_check_5(void)
}
- /* 13) Open the file.
+ /* 16) Open the file.
*
* Verify that the file doesn't contain a metadata cache image
* superblock extension message.
@@ -4907,7 +4963,7 @@ cache_image_smoke_check_5(void)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 14) Validate all the zoos.
+ /* 17) Validate all the zoos.
*
* Verify that the metadata cache image superblock
* extension message has been deleted.
@@ -4935,7 +4991,7 @@ cache_image_smoke_check_5(void)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 15) Close the file. */
+ /* 18) Close the file. */
if ( pass ) {
@@ -4950,7 +5006,7 @@ cache_image_smoke_check_5(void)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 16) Delete the file */
+ /* 19) Delete the file */
if ( pass ) {
@@ -6444,6 +6500,576 @@ cache_image_api_error_check_3(void)
/*-------------------------------------------------------------------------
+ * Function: cache_image_api_error_check_4()
+ *
+ * Purpose: This test is one of a sequence of tests intended
+ * to verify correct management of API errors.
+ *
+ * The object of this test is to verify that a request for
+ * a cache image when a version 2 superblock is not available/
+ * not requested is handled correctly.
+ * (the cache image request should be ignored silently).
+ *
+ * The test is set up as follows:
+ *
+ * 1) Create a FAPL requesting a cache image, but WITHOUT
+ * specifying the latest file format.
+ *
+ * 2) Create a HDF5 file using the above FAPL.
+ *
+ * 3) Create some datasets in the file.
+ *
+ * 4) Close the file.
+ *
+ * 5) Open the file read only. Verify that the file doesn't
+ * contain a cache image.
+ *
+ * 6) Verify that the datasets exist and contain the
+ * expected data
+ *
+ * Verify that the cache image was not loaded.
+ *
+ * 7) Close the file.
+ *
+ * 8) Open the file R/W using the FAPL defined in 1) above.
+ * Verify that the file does not contain a cache image.
+ *
+ * 9) Close the file.
+ *
+ * 10) Open the file R/W using the FAPL defined in 1) above.
+ * Verify that the file does not contain a cache image.
+ *
+ * 11) Verify that the data sets contain the expected data
+ *
+ * Verify that a cache image was not loaded.
+ *
+ * 12) Create several more data sets.
+ *
+ * 13) Close the file.
+ *
+ * 14) Open the file read write.
+ *
+ * Verify that the file does not contain a cache image.
+ *
+ * 15) Verify the data sets exist and contain the expected
+ * data.
+ *
+ * Verify that a cache image was not loaded.
+ *
+ * 16) Close the file.
+ *
+ * 17) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/25/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+cache_image_api_error_check_4(void)
+{
+ const char * fcn_name = "cache_image_api_error_check_4()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+ H5AC_cache_image_config_t cache_image_config;
+
+ TESTING("metadata cache image api error check 4");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Create a FAPL requesting a cache image, but WITHOUT
+ * specifying the latest file format.
+ */
+ if ( pass ) {
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+
+ if ( fapl_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ /* set cache image config fields to taste */
+ cache_image_config.version = H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION;
+ cache_image_config.generate_image = TRUE;
+ cache_image_config.save_resize_status = FALSE;
+ cache_image_config.entry_ageout = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE;
+
+ if ( H5Pset_mdc_image_config(fapl_id, &cache_image_config) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_mdc_image_config() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create a HDF5 file using the above FAPL. */
+
+ if ( pass ) {
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+
+ if ( file_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fcreate() failed.\n";
+
+ } else {
+
+ file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get file_ptr.";
+
+ }
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* get a pointer to the files internal data structure and then
+ * to the cache structure
+ */
+ if ( pass ) {
+
+ if ( file_ptr->shared->cache == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "can't get cache pointer(1).\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache;
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Create some datasets in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ HDassert(cache_ptr);
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Open the file read only. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Verify that the datasets exist and contain the
+ * expected data
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Open the file R/W using the FAPL defined in 1) above.
+ *
+ * Verify that the file does not contain a cache image.
+ */
+
+ if ( pass ) {
+
+ file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
+
+ if ( file_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fopen() failed.\n";
+
+ } else {
+
+ file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get file_ptr.";
+
+ }
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* get a pointer to the files internal data structure and then
+ * to the cache structure
+ */
+ if ( pass ) {
+
+ if ( file_ptr->shared->cache == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "can't get cache pointer(1).\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache;
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ if ( ( cache_ptr->load_image == TRUE ) ||
+ ( cache_ptr->delete_image == TRUE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "mdci sb extension message present?\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+
+ /* 10) Open the file R/W using the FAPL defined in 1) above.
+ * Verify that the file does not contain a cache image.
+ */
+
+ if ( pass ) {
+
+ file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
+
+ if ( file_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fopen() failed.\n";
+
+ } else {
+
+ file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get file_ptr.";
+
+ }
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* get a pointer to the files internal data structure and then
+ * to the cache structure
+ */
+ if ( pass ) {
+
+ if ( file_ptr->shared->cache == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "can't get cache pointer(1).\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache;
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ if ( ( cache_ptr->load_image == TRUE ) ||
+ ( cache_ptr->delete_image == TRUE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "mdci sb extension message present?\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Verify that the data sets contain the expected data
+ *
+ * Verify that a cache image was not loaded.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Create several more data sets. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 6, 10);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 14) Open the file read write.
+ *
+ * Verify that the file does not contain a cache image.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 15) Verify the data sets exist and contain the expected data.
+ *
+ * Verify that a cache image was not loaded.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 10);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 16) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 17) Delete the file */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* tidy up */
+ if ( fapl_id != -1 )
+ H5Pclose(fapl_id);
+
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* cache_image_api_error_check_4() */
+/*-------------------------------------------------------------------------
* Function: main
*
* Purpose: Run tests on the cache code contained in H5C.c
@@ -6489,6 +7115,7 @@ main(void)
nerrs += cache_image_api_error_check_1();
nerrs += cache_image_api_error_check_2();
nerrs += cache_image_api_error_check_3();
+ nerrs += cache_image_api_error_check_4();
return(nerrs > 0);
diff --git a/test/cache_tagging.c b/test/cache_tagging.c
index 02ce19b..8901468 100644
--- a/test/cache_tagging.c
+++ b/test/cache_tagging.c
@@ -53,13 +53,16 @@
/* ===================== */
/* Helper Functions */
+#ifndef NDEBUG
static int dump_cache(hid_t fid);
+#endif /* NDEBUG */ /* end debugging functions */
static int verify_no_unknown_tags(hid_t fid);
static int mark_all_entries_investigated(hid_t fid);
static int reset_all_entries_investigated(hid_t fid);
static int verify_tag(hid_t fid, int id, haddr_t tag);
static int get_object_header_tag(hid_t loc_id, haddr_t *tag);
static int get_sbe_tag(hid_t fid, haddr_t *tag);
+
/* Tests */
static unsigned check_file_creation_tags(hid_t fcpl_id, int type);
static unsigned check_file_open_tags(hid_t fcpl, int type);
@@ -71,7 +74,6 @@ static unsigned check_attribute_rename_tags(hid_t fcpl, int type);
static unsigned check_dataset_creation_tags(hid_t fcpl, int type);
static unsigned check_dataset_creation_earlyalloc_tags(hid_t fcpl, int type);
static unsigned check_link_removal_tags(hid_t fcpl, int type);
-
static unsigned check_group_creation_tags(void);
static unsigned check_multi_group_creation_tags(void);
static unsigned check_group_open_tags(void);
@@ -95,6 +97,7 @@ static unsigned check_invalid_tag_application(void);
/* ================ */
+#ifndef NDEBUG
/*-------------------------------------------------------------------------
* Function: dump_cache()
@@ -128,6 +131,7 @@ static int dump_cache(hid_t fid)
error:
return -1;
} /* dump_cache */
+#endif /* NDEBUG */ /* end debugging functions */
/*-------------------------------------------------------------------------
@@ -445,8 +449,10 @@ check_file_creation_tags(hid_t fcpl_id, int type)
/* Create a test file with provided fcpl_t */
if ( (fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT)) < 0 ) TEST_ERROR;
+#ifndef NDEBUG
/* if verbose, print cache index to screen before verification . */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify there is a superblock entry with superblock tag */
if ( verify_tag(fid, H5AC_SUPERBLOCK_ID, H5AC__SUPERBLOCK_TAG) < 0 ) TEST_ERROR;
@@ -551,8 +557,10 @@ check_file_open_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen before verification . */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify there is a superblock entry with superblock tag. */
if ( verify_tag(fid, H5AC_SUPERBLOCK_ID, H5AC__SUPERBLOCK_TAG) < 0 ) TEST_ERROR;
@@ -653,8 +661,10 @@ check_group_creation_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -756,8 +766,10 @@ check_multi_group_creation_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify there is an object header for each group */
for (i = 0; i < MULTIGROUPS; i++) {
@@ -888,8 +900,10 @@ check_link_iteration_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -1005,8 +1019,10 @@ check_dense_attribute_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify free space header and section info */
if ( verify_tag(fid, H5AC_FSPACE_SINFO_ID, d_tag) < 0 ) TEST_ERROR;
@@ -1061,8 +1077,10 @@ check_dense_attribute_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify object header belonging to dataset */
if ( verify_tag(fid, H5AC_OHDR_ID, d_tag) < 0 ) TEST_ERROR;
@@ -1169,8 +1187,10 @@ check_group_open_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -1273,8 +1293,10 @@ check_attribute_creation_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify object header belonging to group */
if ( verify_tag(fid, H5AC_OHDR_ID, g_tag) < 0 ) TEST_ERROR;
@@ -1407,8 +1429,10 @@ check_attribute_open_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify object header belonging to group */
if ( verify_tag(fid, H5AC_OHDR_ID, g_tag) < 0 ) TEST_ERROR;
@@ -1550,8 +1574,10 @@ check_attribute_rename_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -1712,8 +1738,10 @@ check_attribute_delete_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify object header belonging to group */
if ( verify_tag(fid, H5AC_OHDR_ID, g_tag) < 0 ) TEST_ERROR;
@@ -1834,8 +1862,10 @@ check_dataset_creation_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -1959,8 +1989,10 @@ check_dataset_creation_earlyalloc_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -2097,8 +2129,10 @@ check_dataset_open_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -2228,8 +2262,10 @@ check_dataset_write_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify 10 b-tree nodes belonging to dataset */
for (i=0; i<10; i++)
@@ -2351,8 +2387,10 @@ check_attribute_write_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify object header of group */
if ( verify_tag(fid, H5AC_OHDR_ID, g_tag) < 0 ) TEST_ERROR;
@@ -2506,8 +2544,10 @@ check_dataset_read_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify 19 b-tree nodes belonging to dataset */
for (i=0; i<19; i++)
@@ -2636,8 +2676,10 @@ check_dataset_size_retrieval(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify 19 b-tree nodes belonging to dataset */
for (i=0; i<19; i++)
@@ -2769,8 +2811,10 @@ check_dataset_extend_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, d_tag) < 0 ) TEST_ERROR;
@@ -2866,8 +2910,10 @@ check_object_info_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -2972,8 +3018,10 @@ check_object_copy_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -3122,8 +3170,10 @@ check_link_removal_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -3271,8 +3321,10 @@ check_link_getname_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -3371,8 +3423,10 @@ check_external_link_creation_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -3478,8 +3532,10 @@ check_external_link_open_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify tag value of first file's root group */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
diff --git a/test/evict_on_close.c b/test/evict_on_close.c
index 3986d5a..b00c1e4 100644
--- a/test/evict_on_close.c
+++ b/test/evict_on_close.c
@@ -40,6 +40,7 @@
#include "H5Ipkg.h"
/* Uncomment to manually inspect cache states */
+/* (Requires debug build of the library) */
/* #define EOC_MANUAL_INSPECTION */
const char *FILENAMES[] = {