summaryrefslogtreecommitdiffstats
path: root/src/H5Cmpio.c
diff options
context:
space:
mode:
authormainzer <mainzer#hdfgroup.org>2020-08-05 20:39:49 (GMT)
committermainzer <mainzer#hdfgroup.org>2020-08-05 20:39:49 (GMT)
commit33f35183cbfdde70ee8f803acb5b735ad4dfe086 (patch)
treee18d05c2c6b34f4baba1d4b28250dc4cf9d51171 /src/H5Cmpio.c
parent50f404c887118577034c6412aeaaa1f6db2fe475 (diff)
downloadhdf5-33f35183cbfdde70ee8f803acb5b735ad4dfe086.zip
hdf5-33f35183cbfdde70ee8f803acb5b735ad4dfe086.tar.gz
hdf5-33f35183cbfdde70ee8f803acb5b735ad4dfe086.tar.bz2
When flushing, the metadata cache attempts to flush entries in increasing
address order. To facilitate this, the metadata cache needs a list of of dirty entries in increasing address order. This is implemented via a skip list of all dirty entries in the cache. To date this skip list has been maintained at all times. However, profiling indicates that we can avoid significant overhead by constructing the skip list of dirty entries just before a flush, taking it down afterwareds, and not maintaining it during normal operation. This commit implements this optimization for both serial and parallel. Tested serial and parallel, debug and production on charis and jelly.
Diffstat (limited to 'src/H5Cmpio.c')
-rw-r--r--src/H5Cmpio.c391
1 files changed, 235 insertions, 156 deletions
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index 199c494..31dc647 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -18,7 +18,7 @@
* Quincey Koziol
*
* Purpose: Functions in this file implement support for parallel I/O for
- * generic cache code.
+ * generic cache code.
*
*-------------------------------------------------------------------------
*/
@@ -28,20 +28,20 @@
/****************/
#include "H5Cmodule.h" /* This source code file is part of the H5C module */
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
/***********/
/* Headers */
/***********/
-#include "H5private.h" /* Generic Functions */
+#include "H5private.h" /* Generic Functions */
#include "H5ACprivate.h" /* Metadata cache */
-#include "H5Cpkg.h" /* Cache */
+#include "H5Cpkg.h" /* Cache */
#include "H5CXprivate.h" /* API Contexts */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5Fpkg.h" /* Files */
-#include "H5FDprivate.h" /* File drivers */
-#include "H5MMprivate.h" /* Memory management */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* Files */
+#include "H5FDprivate.h" /* File drivers */
+#include "H5MMprivate.h" /* Memory management */
#ifdef H5_HAVE_PARALLEL
@@ -87,74 +87,74 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
*
* Purpose: Apply the supplied candidate list.
*
- * We used to do this by simply having each process write
- * every mpi_size-th entry in the candidate list, starting
- * at index mpi_rank, and mark all the others clean.
+ * We used to do this by simply having each process write
+ * every mpi_size-th entry in the candidate list, starting
+ * at index mpi_rank, and mark all the others clean.
*
- * However, this can cause unnecessary contention in a file
- * system by increasing the number of processes writing to
- * adjacent locations in the HDF5 file.
+ * However, this can cause unnecessary contention in a file
+ * system by increasing the number of processes writing to
+ * adjacent locations in the HDF5 file.
*
- * To attempt to minimize this, we now arange matters such
- * that each process writes n adjacent entries in the
- * candidate list, and marks all others clean. We must do
- * this in such a fashion as to guarantee that each entry
- * on the candidate list is written by exactly one process,
- * and marked clean by all others.
+ * To attempt to minimize this, we now arange matters such
+ * that each process writes n adjacent entries in the
+ * candidate list, and marks all others clean. We must do
+ * this in such a fashion as to guarantee that each entry
+ * on the candidate list is written by exactly one process,
+ * and marked clean by all others.
*
- * To do this, first construct a table mapping mpi_rank
- * to the index of the first entry in the candidate list to
- * be written by the process of that mpi_rank, and then use
- * the table to control which entries are written and which
- * are marked as clean as a function of the mpi_rank.
+ * To do this, first construct a table mapping mpi_rank
+ * to the index of the first entry in the candidate list to
+ * be written by the process of that mpi_rank, and then use
+ * the table to control which entries are written and which
+ * are marked as clean as a function of the mpi_rank.
*
- * Note that the table must be identical on all processes, as
- * all see the same candidate list, mpi_size, and mpi_rank --
- * the inputs used to construct the table.
+ * Note that the table must be identical on all processes, as
+ * all see the same candidate list, mpi_size, and mpi_rank --
+ * the inputs used to construct the table.
*
- * We construct the table as follows. Let:
+ * We construct the table as follows. Let:
*
- * n = num_candidates / mpi_size;
+ * n = num_candidates / mpi_size;
*
- * m = num_candidates % mpi_size;
+ * m = num_candidates % mpi_size;
*
- * Now allocate an array of integers of length mpi_size + 1,
- * and call this array candidate_assignment_table.
+ * Now allocate an array of integers of length mpi_size + 1,
+ * and call this array candidate_assignment_table.
*
- * Conceptually, if the number of candidates is a multiple
- * of the mpi_size, we simply pass through the candidate list
- * and assign n entries to each process to flush, with the
- * index of the first entry to flush in the location in
- * the candidate_assignment_table indicated by the mpi_rank
- * of the process.
+ * Conceptually, if the number of candidates is a multiple
+ * of the mpi_size, we simply pass through the candidate list
+ * and assign n entries to each process to flush, with the
+ * index of the first entry to flush in the location in
+ * the candidate_assignment_table indicated by the mpi_rank
+ * of the process.
*
- * In the more common case in which the candidate list isn't
- * isn't a multiple of the mpi_size, we pretend it is, and
- * give num_candidates % mpi_size processes one extra entry
- * each to make things work out.
+ * In the more common case in which the candidate list isn't
+ * isn't a multiple of the mpi_size, we pretend it is, and
+ * give num_candidates % mpi_size processes one extra entry
+ * each to make things work out.
*
- * Once the table is constructed, we determine the first and
- * last entry this process is to flush as follows:
+ * Once the table is constructed, we determine the first and
+ * last entry this process is to flush as follows:
*
- * first_entry_to_flush = candidate_assignment_table[mpi_rank]
+ * first_entry_to_flush = candidate_assignment_table[mpi_rank]
*
- * last_entry_to_flush =
- * candidate_assignment_table[mpi_rank + 1] - 1;
+ * last_entry_to_flush =
+ * candidate_assignment_table[mpi_rank + 1] - 1;
*
- * With these values determined, we simply scan through the
- * candidate list, marking all entries in the range
- * [first_entry_to_flush, last_entry_to_flush] for flush,
- * and all others to be cleaned.
+ * With these values determined, we simply scan through the
+ * candidate list, marking all entries in the range
+ * [first_entry_to_flush, last_entry_to_flush] for flush,
+ * and all others to be cleaned.
*
- * Finally, we scan the LRU from tail to head, flushing
- * or marking clean the candidate entries as indicated.
- * If necessary, we scan the pinned list as well.
+ * Finally, we scan the LRU from tail to head, flushing
+ * or marking clean the candidate entries as indicated.
+ * If necessary, we scan the pinned list as well.
*
- * Note that this function will fail if any protected or
- * clean entries appear on the candidate list.
+ * Note that this function will fail if any protected or
+ * clean entries appear on the candidate list.
*
- * This function is used in managing sync points, and
- * shouldn't be used elsewhere.
+ * This function is used in managing sync points, and
+ * shouldn't be used elsewhere.
*
* Return: Success: SUCCEED
*
@@ -163,6 +163,10 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: Updated sanity checks to allow for the possibility that
+ * the slist is disabled.
+ * JRM -- 8/3/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -183,14 +187,17 @@ H5C_apply_candidate_list(H5F_t * f,
unsigned * candidate_assignment_table = NULL;
unsigned entries_to_flush[H5C_RING_NTYPES];
unsigned entries_to_clear[H5C_RING_NTYPES];
- haddr_t addr;
- H5C_cache_entry_t * entry_ptr = NULL;
+ haddr_t addr;
+ H5C_cache_entry_t * entry_ptr = NULL;
+
#if H5C_DO_SANITY_CHECKS
- haddr_t last_addr;
+ haddr_t last_addr;
#endif /* H5C_DO_SANITY_CHECKS */
+
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- char tbl_buf[1024];
+ char tbl_buf[1024];
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -200,7 +207,8 @@ H5C_apply_candidate_list(H5F_t * f,
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(num_candidates > 0);
- HDassert(num_candidates <= cache_ptr->slist_len);
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( num_candidates <= cache_ptr->slist_len ));
HDassert(candidates_list_ptr != NULL);
HDassert(0 <= mpi_rank);
HDassert(mpi_rank < mpi_size);
@@ -410,13 +418,14 @@ done:
/*-------------------------------------------------------------------------
+ *
* Function: H5C_construct_candidate_list__clean_cache
*
* Purpose: Construct the list of entries that should be flushed to
- * clean all entries in the cache.
+ * clean all entries in the cache.
*
- * This function is used in managing sync points, and
- * shouldn't be used elsewhere.
+ * This function is used in managing sync points, and
+ * shouldn't be used elsewhere.
*
* Return: Success: SUCCEED
*
@@ -425,6 +434,16 @@ done:
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: With the slist optimization, the slist is not maintained
+ * unless a flush is in progress. Thus we can not longer use
+ * cache_ptr->slist_size to determine the total size of
+ * the entries we must insert in the candidate list.
+ *
+ * To address this, we now use cache_ptr->dirty_index_size
+ * instead.
+ *
+ * JRM -- 7/27/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -438,60 +457,82 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
HDassert( cache_ptr != NULL );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- /* As a sanity check, set space needed to the size of the skip list.
- * This should be the sum total of the sizes of all the dirty entries
- * in the metadata cache.
+ /* As a sanity check, set space needed to the dirty_index_size. This
+ * should be the sum total of the sizes of all the dirty entries in
+ * in the metadata cache. Note that if the slist is enabled,
+ * cache_ptr->slist_size should equal cache_ptr->dirty_index_size.
*/
- space_needed = cache_ptr->slist_size;
+ space_needed = cache_ptr->dirty_index_size;
+
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( space_needed == cache_ptr->slist_size ) );
+
/* Recall that while we shouldn't have any protected entries at this
* point, it is possible that some dirty entries may reside on the
* pinned list at this point.
*/
- HDassert( cache_ptr->slist_size <=
+ HDassert( cache_ptr->dirty_index_size <=
(cache_ptr->dLRU_list_size + cache_ptr->pel_size) );
- HDassert( cache_ptr->slist_len <=
- (cache_ptr->dLRU_list_len + cache_ptr->pel_len) );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( cache_ptr->slist_len <=
+ (cache_ptr->dLRU_list_len + cache_ptr->pel_len) ) );
+
if(space_needed > 0) { /* we have work to do */
+
H5C_cache_entry_t *entry_ptr;
unsigned nominated_entries_count = 0;
size_t nominated_entries_size = 0;
- haddr_t nominated_addr;
+ haddr_t nominated_addr;
- HDassert( cache_ptr->slist_len > 0 );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( cache_ptr->slist_len > 0 ) );
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
*/
entry_ptr = cache_ptr->dLRU_tail_ptr;
- while((nominated_entries_size < space_needed) &&
- (nominated_entries_count < cache_ptr->slist_len) &&
- (entry_ptr != NULL)) {
+
+ while ( ( nominated_entries_size < space_needed ) &&
+ ( ( ! cache_ptr->slist_enabled ) ||
+ ( nominated_entries_count < cache_ptr->slist_len ) ) &&
+ ( entry_ptr != NULL ) ) {
+
HDassert( ! (entry_ptr->is_protected) );
HDassert( ! (entry_ptr->is_read_only) );
HDassert( entry_ptr->ro_ref_count == 0 );
HDassert( entry_ptr->is_dirty );
- HDassert( entry_ptr->in_slist );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( entry_ptr->in_slist ) );
nominated_addr = entry_ptr->addr;
+
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
entry_ptr = entry_ptr->aux_prev;
+
} /* end while */
+
HDassert( entry_ptr == NULL );
/* it is possible that there are some dirty entries on the
* protected entry list as well -- scan it too if necessary
*/
entry_ptr = cache_ptr->pel_head_ptr;
- while((nominated_entries_size < space_needed) &&
- (nominated_entries_count < cache_ptr->slist_len) &&
- (entry_ptr != NULL)) {
+
+ while ( ( nominated_entries_size < space_needed ) &&
+ ( ( ! cache_ptr->slist_enabled ) ||
+ ( nominated_entries_count < cache_ptr->slist_len ) ) &&
+ ( entry_ptr != NULL ) ) {
+
if(entry_ptr->is_dirty) {
+
HDassert( ! (entry_ptr->is_protected) );
HDassert( ! (entry_ptr->is_read_only) );
HDassert( entry_ptr->ro_ref_count == 0 );
@@ -499,22 +540,31 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
HDassert( entry_ptr->in_slist );
nominated_addr = entry_ptr->addr;
+
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
+
} /* end if */
entry_ptr = entry_ptr->next;
+
} /* end while */
- HDassert( nominated_entries_count == cache_ptr->slist_len );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( nominated_entries_count == cache_ptr->slist_len ) );
HDassert( nominated_entries_size == space_needed );
+
} /* end if */
done:
+
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C_construct_candidate_list__clean_cache() */
@@ -522,10 +572,10 @@ done:
* Function: H5C_construct_candidate_list__min_clean
*
* Purpose: Construct the list of entries that should be flushed to
- * get the cache back within its min clean constraints.
+ * get the cache back within its min clean constraints.
*
- * This function is used in managing sync points, and
- * shouldn't be used elsewhere.
+ * This function is used in managing sync points, and
+ * shouldn't be used elsewhere.
*
* Return: Success: SUCCEED
*
@@ -534,6 +584,12 @@ done:
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: With the slist optimization, the slist is not maintained
+ * unless a flush is in progress. Updated sanity checks to
+ * reflect this.
+ *
+ * JRM -- 7/27/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -551,54 +607,77 @@ H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr)
* cache back within its min clean constraints.
*/
if(cache_ptr->max_cache_size > cache_ptr->index_size) {
- if(((cache_ptr->max_cache_size - cache_ptr->index_size) +
- cache_ptr->cLRU_list_size) >= cache_ptr->min_clean_size)
+
+ if ( ( (cache_ptr->max_cache_size - cache_ptr->index_size) +
+ cache_ptr->cLRU_list_size) >= cache_ptr->min_clean_size ) {
+
space_needed = 0;
- else
+
+ } else {
+
space_needed = cache_ptr->min_clean_size -
((cache_ptr->max_cache_size - cache_ptr->index_size) +
cache_ptr->cLRU_list_size);
+ }
} /* end if */
else {
- if(cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size)
+
+ if(cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size) {
+
space_needed = 0;
- else
+
+ } else {
+
space_needed = cache_ptr->min_clean_size -
cache_ptr->cLRU_list_size;
+ }
} /* end else */
if(space_needed > 0) { /* we have work to do */
+
H5C_cache_entry_t *entry_ptr;
unsigned nominated_entries_count = 0;
size_t nominated_entries_size = 0;
- HDassert( cache_ptr->slist_len > 0 );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( cache_ptr->slist_len > 0 ) );
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
*/
entry_ptr = cache_ptr->dLRU_tail_ptr;
- while((nominated_entries_size < space_needed) &&
- (nominated_entries_count < cache_ptr->slist_len) &&
- (entry_ptr != NULL) &&
- (!entry_ptr->flush_me_last)) {
- haddr_t nominated_addr;
+
+ while ( ( nominated_entries_size < space_needed ) &&
+ ( ( ! cache_ptr->slist_enabled ) ||
+ ( nominated_entries_count < cache_ptr->slist_len ) ) &&
+ ( entry_ptr != NULL ) &&
+ ( ! entry_ptr->flush_me_last ) ) {
+
+ haddr_t nominated_addr;
HDassert( ! (entry_ptr->is_protected) );
HDassert( ! (entry_ptr->is_read_only) );
HDassert( entry_ptr->ro_ref_count == 0 );
HDassert( entry_ptr->is_dirty );
- HDassert( entry_ptr->in_slist );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( entry_ptr->in_slist ) );
nominated_addr = entry_ptr->addr;
+
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
entry_ptr = entry_ptr->aux_prev;
+
} /* end while */
- HDassert( nominated_entries_count <= cache_ptr->slist_len );
+
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( nominated_entries_count <= cache_ptr->slist_len ) );
+ HDassert( nominated_entries_size <= cache_ptr->dirty_index_size );
HDassert( nominated_entries_size >= space_needed );
} /* end if */
@@ -612,24 +691,24 @@ done:
* Function: H5C_mark_entries_as_clean
*
* Purpose: When the H5C code is used to implement the metadata caches
- * in PHDF5, only the cache with MPI_rank 0 is allowed to
- * actually write entries to disk -- all other caches must
- * retain dirty entries until they are advised that the
- * entries are clean.
+ * in PHDF5, only the cache with MPI_rank 0 is allowed to
+ * actually write entries to disk -- all other caches must
+ * retain dirty entries until they are advised that the
+ * entries are clean.
*
- * This function exists to allow the H5C code to receive these
- * notifications.
+ * This function exists to allow the H5C code to receive these
+ * notifications.
*
- * The function receives a list of entry base addresses
- * which must refer to dirty entries in the cache. If any
- * of the entries are either clean or don't exist, the
- * function flags an error.
+ * The function receives a list of entry base addresses
+ * which must refer to dirty entries in the cache. If any
+ * of the entries are either clean or don't exist, the
+ * function flags an error.
*
- * The function scans the list of entries and flushes all
- * those that are currently unprotected with the
- * H5C__FLUSH_CLEAR_ONLY_FLAG. Those that are currently
- * protected are flagged for clearing when they are
- * unprotected.
+ * The function scans the list of entries and flushes all
+ * those that are currently unprotected with the
+ * H5C__FLUSH_CLEAR_ONLY_FLAG. Those that are currently
+ * protected are flagged for clearing when they are
+ * unprotected.
*
* Return: Non-negative on success/Negative on failure
*
@@ -644,22 +723,22 @@ H5C_mark_entries_as_clean(H5F_t * f,
haddr_t * ce_array_ptr)
{
H5C_t * cache_ptr;
- unsigned entries_cleared;
+ unsigned entries_cleared;
unsigned pinned_entries_cleared;
hbool_t progress;
- unsigned entries_examined;
- unsigned initial_list_len;
- haddr_t addr;
- unsigned pinned_entries_marked = 0;
+ unsigned entries_examined;
+ unsigned initial_list_len;
+ haddr_t addr;
+ unsigned pinned_entries_marked = 0;
#if H5C_DO_SANITY_CHECKS
- unsigned protected_entries_marked = 0;
- unsigned other_entries_marked = 0;
- haddr_t last_addr;
+ unsigned protected_entries_marked = 0;
+ unsigned other_entries_marked = 0;
+ haddr_t last_addr;
#endif /* H5C_DO_SANITY_CHECKS */
- H5C_cache_entry_t * clear_ptr = NULL;
- H5C_cache_entry_t * entry_ptr = NULL;
+ H5C_cache_entry_t * clear_ptr = NULL;
+ H5C_cache_entry_t * entry_ptr = NULL;
unsigned u;
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -706,7 +785,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
if(entry_ptr == NULL) {
#if H5C_DO_SANITY_CHECKS
- HDfprintf(stdout,
+ HDfprintf(stdout,
"H5C_mark_entries_as_clean: entry[%u] = %a not in cache.\n",
u,
addr);
@@ -715,7 +794,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
} /* end if */
else if(!entry_ptr->is_dirty) {
#if H5C_DO_SANITY_CHECKS
- HDfprintf(stdout,
+ HDfprintf(stdout,
"H5C_mark_entries_as_clean: entry %a is not dirty!?!\n",
addr);
#endif /* H5C_DO_SANITY_CHECKS */
@@ -735,13 +814,13 @@ H5C_mark_entries_as_clean(H5F_t * f,
} /* end if */
entry_ptr->clear_on_unprotect = TRUE;
- if(entry_ptr->is_pinned)
- pinned_entries_marked++;
+ if(entry_ptr->is_pinned)
+ pinned_entries_marked++;
#if H5C_DO_SANITY_CHECKS
- else if(entry_ptr->is_protected)
- protected_entries_marked++;
- else
- other_entries_marked++;
+ else if(entry_ptr->is_protected)
+ protected_entries_marked++;
+ else
+ other_entries_marked++;
#endif /* H5C_DO_SANITY_CHECKS */
}
}
@@ -769,7 +848,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
* of the pre_serialize / serialize routines, this may
* cease to be the case -- requiring a review of this
* point.
- * JRM -- 4/7/15
+ * JRM -- 4/7/15
*/
entries_cleared = 0;
entries_examined = 0;
@@ -873,9 +952,9 @@ done:
herr_t
H5C_clear_coll_entries(H5C_t *cache_ptr, hbool_t partial)
{
- uint32_t clear_cnt;
- H5C_cache_entry_t * entry_ptr = NULL;
- herr_t ret_value = SUCCEED;
+ uint32_t clear_cnt;
+ H5C_cache_entry_t * entry_ptr = NULL;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT
@@ -1084,7 +1163,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__flush_candidate_entries
*
- * Purpose: Flush or clear (as indicated) the candidate entries that
+ * Purpose: Flush or clear (as indicated) the candidate entries that
* have been marked in the metadata cache. In so doing,
* observe rings and flush dependencies.
*
@@ -1113,9 +1192,9 @@ done:
* Return: Non-negative on success/Negative on failure.
*
* Programmer: John Mainzer
- * 2/10/17
+ * 2/10/17
*
- * Changes: None.
+ * Changes: None.
*
*-------------------------------------------------------------------------
*/
@@ -1124,17 +1203,17 @@ H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES
unsigned entries_to_clear[H5C_RING_NTYPES])
{
#if H5C_DO_SANITY_CHECKS
- int i;
- uint32_t index_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
- uint32_t slist_len = 0;
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- H5C_ring_t ring;
+ H5C_ring_t ring;
H5C_t * cache_ptr;
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -1164,7 +1243,7 @@ H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES
clean_index_size += cache_ptr->clean_index_ring_size[i];
dirty_index_size += cache_ptr->dirty_index_ring_size[i];
- slist_len += cache_ptr->slist_ring_len[i];
+ slist_len += cache_ptr->slist_ring_len[i];
slist_size += cache_ptr->slist_ring_size[i];
} /* end for */
@@ -1206,7 +1285,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__flush_candidates_in_ring
*
- * Purpose: Flush or clear (as indicated) the candidate entries
+ * Purpose: Flush or clear (as indicated) the candidate entries
* contained in the specified cache and ring. All candidate
* entries in rings outside the specified ring must have been
* flushed (or cleared) on entry.
@@ -1235,7 +1314,7 @@ done:
* Return: Non-negative on success/Negative on failure.
*
* Programmer: John Mainzer
- * 2/10/17
+ * 2/10/17
*
*-------------------------------------------------------------------------
*/