summaryrefslogtreecommitdiffstats
path: root/src/H5Cmpio.c
diff options
context:
space:
mode:
authorAllen Byrne <byrn@hdfgroup.org>2020-09-30 14:27:10 (GMT)
committerAllen Byrne <byrn@hdfgroup.org>2020-09-30 14:27:10 (GMT)
commitb2d661b508a7fc7a2592c13bc6bdc175551f075d (patch)
tree13baeb0d83a7c2a4c6299993c182b1227c2f6114 /src/H5Cmpio.c
parent29ab58b58dce556639ea3154e262895773a8a8df (diff)
downloadhdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.zip
hdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.tar.gz
hdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.tar.bz2
Clang-format of source files
Diffstat (limited to 'src/H5Cmpio.c')
-rw-r--r--src/H5Cmpio.c767
1 files changed, 357 insertions, 410 deletions
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index 9171cf7..4a3a2d8 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -27,22 +27,20 @@
/* Module Setup */
/****************/
-#include "H5Cmodule.h" /* This source code file is part of the H5C module */
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
-
+#include "H5Cmodule.h" /* This source code file is part of the H5C module */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
/***********/
/* Headers */
/***********/
-#include "H5private.h" /* Generic Functions */
-#include "H5ACprivate.h" /* Metadata cache */
-#include "H5Cpkg.h" /* Cache */
-#include "H5CXprivate.h" /* API Contexts */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5Fpkg.h" /* Files */
-#include "H5FDprivate.h" /* File drivers */
-#include "H5MMprivate.h" /* Memory management */
-
+#include "H5private.h" /* Generic Functions */
+#include "H5ACprivate.h" /* Metadata cache */
+#include "H5Cpkg.h" /* Cache */
+#include "H5CXprivate.h" /* API Contexts */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* Files */
+#include "H5FDprivate.h" /* File drivers */
+#include "H5MMprivate.h" /* Memory management */
#ifdef H5_HAVE_PARALLEL
/****************/
@@ -50,38 +48,31 @@
/****************/
#define H5C_APPLY_CANDIDATE_LIST__DEBUG 0
-
/******************/
/* Local Typedefs */
/******************/
-
/********************/
/* Local Prototypes */
/********************/
static herr_t H5C__collective_write(H5F_t *f);
static herr_t H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES],
- unsigned entries_to_clear[H5C_RING_NTYPES]);
-static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
- unsigned entries_to_flush, unsigned entries_to_clear);
-
+ unsigned entries_to_clear[H5C_RING_NTYPES]);
+static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flush,
+ unsigned entries_to_clear);
/*********************/
/* Package Variables */
/*********************/
-
/*****************************/
/* Library Private Variables */
/*****************************/
-
/*******************/
/* Local Variables */
/*******************/
-
-
/*-------------------------------------------------------------------------
* Function: H5C_apply_candidate_list
*
@@ -163,43 +154,39 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
* Programmer: John Mainzer
* 3/17/10
*
- * Changes: Updated sanity checks to allow for the possibility that
+ * Changes: Updated sanity checks to allow for the possibility that
* the slist is disabled.
* JRM -- 8/3/20
*
*-------------------------------------------------------------------------
*/
herr_t
-H5C_apply_candidate_list(H5F_t * f,
- H5C_t * cache_ptr,
- unsigned num_candidates,
- haddr_t * candidates_list_ptr,
- int mpi_rank,
- int mpi_size)
+H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, haddr_t *candidates_list_ptr,
+ int mpi_rank, int mpi_size)
{
- int i;
- int m;
- unsigned n;
- unsigned first_entry_to_flush;
- unsigned last_entry_to_flush;
- unsigned total_entries_to_clear = 0;
- unsigned total_entries_to_flush = 0;
- unsigned * candidate_assignment_table = NULL;
- unsigned entries_to_flush[H5C_RING_NTYPES];
- unsigned entries_to_clear[H5C_RING_NTYPES];
- haddr_t addr;
- H5C_cache_entry_t * entry_ptr = NULL;
+ int i;
+ int m;
+ unsigned n;
+ unsigned first_entry_to_flush;
+ unsigned last_entry_to_flush;
+ unsigned total_entries_to_clear = 0;
+ unsigned total_entries_to_flush = 0;
+ unsigned * candidate_assignment_table = NULL;
+ unsigned entries_to_flush[H5C_RING_NTYPES];
+ unsigned entries_to_clear[H5C_RING_NTYPES];
+ haddr_t addr;
+ H5C_cache_entry_t *entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- haddr_t last_addr;
+ haddr_t last_addr;
#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- char tbl_buf[1024];
+ char tbl_buf[1024];
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -207,8 +194,7 @@ H5C_apply_candidate_list(H5F_t * f,
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(num_candidates > 0);
- HDassert( ( ! cache_ptr->slist_enabled ) ||
- ( num_candidates <= cache_ptr->slist_len ));
+ HDassert((!cache_ptr->slist_enabled) || (num_candidates <= cache_ptr->slist_len));
HDassert(candidates_list_ptr != NULL);
HDassert(0 <= mpi_rank);
HDassert(mpi_rank < mpi_size);
@@ -223,93 +209,95 @@ H5C_apply_candidate_list(H5F_t * f,
HDmemset(tbl_buf, 0, sizeof(tbl_buf));
HDsprintf(&(tbl_buf[0]), "candidate list = ");
- for(u = 0; u < num_candidates; u++)
+ for (u = 0; u < num_candidates; u++)
HDsprintf(&(tbl_buf[HDstrlen(tbl_buf)]), " 0x%llx", (long long)(*(candidates_list_ptr + u)));
HDsprintf(&(tbl_buf[HDstrlen(tbl_buf)]), "\n");
HDfprintf(stdout, "%s", tbl_buf);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
- if(f->shared->coll_md_write) {
+ if (f->shared->coll_md_write) {
/* Sanity check */
HDassert(NULL == cache_ptr->coll_write_list);
/* Create skip list of entries for collective write */
- if(NULL == (cache_ptr->coll_write_list = H5SL_create(H5SL_TYPE_HADDR, NULL)))
+ if (NULL == (cache_ptr->coll_write_list = H5SL_create(H5SL_TYPE_HADDR, NULL)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for entries")
} /* end if */
n = num_candidates / (unsigned)mpi_size;
- if(num_candidates % (unsigned)mpi_size > INT_MAX)
+ if (num_candidates % (unsigned)mpi_size > INT_MAX)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "m overflow")
m = (int)(num_candidates % (unsigned)mpi_size);
- if(NULL == (candidate_assignment_table = (unsigned *)H5MM_malloc(sizeof(unsigned) * (size_t)(mpi_size + 1))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for candidate assignment table")
+ if (NULL ==
+ (candidate_assignment_table = (unsigned *)H5MM_malloc(sizeof(unsigned) * (size_t)(mpi_size + 1))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+ "memory allocation failed for candidate assignment table")
- candidate_assignment_table[0] = 0;
+ candidate_assignment_table[0] = 0;
candidate_assignment_table[mpi_size] = num_candidates;
- if(m == 0) { /* mpi_size is an even divisor of num_candidates */
- for(i = 1; i < mpi_size; i++)
+ if (m == 0) { /* mpi_size is an even divisor of num_candidates */
+ for (i = 1; i < mpi_size; i++)
candidate_assignment_table[i] = candidate_assignment_table[i - 1] + n;
} /* end if */
else {
- for(i = 1; i <= m; i++)
+ for (i = 1; i <= m; i++)
candidate_assignment_table[i] = candidate_assignment_table[i - 1] + n + 1;
- if(num_candidates < (unsigned)mpi_size) {
- for(i = m + 1; i < mpi_size; i++)
+ if (num_candidates < (unsigned)mpi_size) {
+ for (i = m + 1; i < mpi_size; i++)
candidate_assignment_table[i] = num_candidates;
} /* end if */
else {
- for(i = m + 1; i < mpi_size; i++)
+ for (i = m + 1; i < mpi_size; i++)
candidate_assignment_table[i] = candidate_assignment_table[i - 1] + n;
} /* end else */
- } /* end else */
+ } /* end else */
HDassert((candidate_assignment_table[mpi_size - 1] + n) == num_candidates);
#if H5C_DO_SANITY_CHECKS
/* Verify that the candidate assignment table has the expected form */
- for(i = 1; i < mpi_size - 1; i++) {
+ for (i = 1; i < mpi_size - 1; i++) {
unsigned a, b;
a = candidate_assignment_table[i] - candidate_assignment_table[i - 1];
b = candidate_assignment_table[i + 1] - candidate_assignment_table[i];
- HDassert( n + 1 >= a );
- HDassert( a >= b );
- HDassert( b >= n );
+ HDassert(n + 1 >= a);
+ HDassert(a >= b);
+ HDassert(b >= n);
}
#endif /* H5C_DO_SANITY_CHECKS */
first_entry_to_flush = candidate_assignment_table[mpi_rank];
- last_entry_to_flush = candidate_assignment_table[mpi_rank + 1] - 1;
+ last_entry_to_flush = candidate_assignment_table[mpi_rank + 1] - 1;
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- for ( i = 0; i < 1024; i++ )
+ for (i = 0; i < 1024; i++)
tbl_buf[i] = '\0';
HDsprintf(&(tbl_buf[0]), "candidate assignment table = ");
- for(i = 0; i <= mpi_size; i++)
+ for (i = 0; i <= mpi_size; i++)
HDsprintf(&(tbl_buf[HDstrlen(tbl_buf)]), " %u", candidate_assignment_table[i]);
HDsprintf(&(tbl_buf[HDstrlen(tbl_buf)]), "\n");
HDfprintf(stdout, "%s", tbl_buf);
- HDfprintf(stdout, "%s:%d: flush entries [%u, %u].\n",
- FUNC, mpi_rank, first_entry_to_flush, last_entry_to_flush);
+ HDfprintf(stdout, "%s:%d: flush entries [%u, %u].\n", FUNC, mpi_rank, first_entry_to_flush,
+ last_entry_to_flush);
HDfprintf(stdout, "%s:%d: marking entries.\n", FUNC, mpi_rank);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
- for(u = 0; u < num_candidates; u++) {
+ for (u = 0; u < num_candidates; u++) {
addr = candidates_list_ptr[u];
HDassert(H5F_addr_defined(addr));
#if H5C_DO_SANITY_CHECKS
- if(u > 0) {
- if(last_addr == addr)
+ if (u > 0) {
+ if (last_addr == addr)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "duplicate entry in cleaned list")
- else if(last_addr > addr)
+ else if (last_addr > addr)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "candidate list not sorted")
} /* end if */
@@ -317,11 +305,11 @@ H5C_apply_candidate_list(H5F_t * f,
#endif /* H5C_DO_SANITY_CHECKS */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
- if(entry_ptr == NULL)
+ if (entry_ptr == NULL)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "listed candidate entry not in cache?!?!?")
- if(!entry_ptr->is_dirty)
+ if (!entry_ptr->is_dirty)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not dirty?!?!?")
- if(entry_ptr->is_protected)
+ if (entry_ptr->is_protected)
/* For now at least, we can't deal with protected entries.
* If we encounter one, scream and die. If it becomes an
* issue, we should be able to work around this.
@@ -340,7 +328,7 @@ H5C_apply_candidate_list(H5F_t * f,
* pinned list shortly, and clear or flush according to these
* markings.
*/
- if(u >= first_entry_to_flush && u <= last_entry_to_flush) {
+ if (u >= first_entry_to_flush && u <= last_entry_to_flush) {
total_entries_to_flush++;
entries_to_flush[entry_ptr->ring]++;
entry_ptr->flush_immediately = TRUE;
@@ -357,16 +345,16 @@ H5C_apply_candidate_list(H5F_t * f,
* candidate list is collective and uniform across all
* ranks.
*/
- if(entry_ptr->coll_access) {
+ if (entry_ptr->coll_access) {
entry_ptr->coll_access = FALSE;
H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
} /* end if */
- } /* end for */
+ } /* end for */
#if H5C_DO_SANITY_CHECKS
m = 0;
n = 0;
- for(i = 0; i < H5C_RING_NTYPES; i++) {
+ for (i = 0; i < H5C_RING_NTYPES; i++) {
m += (int)entries_to_flush[i];
n += entries_to_clear[i];
} /* end if */
@@ -376,9 +364,8 @@ H5C_apply_candidate_list(H5F_t * f,
#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %u/%u/%u.\n",
- FUNC, mpi_rank, num_candidates, total_entries_to_clear,
- total_entries_to_flush);
+ HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %u/%u/%u.\n", FUNC, mpi_rank, num_candidates,
+ total_entries_to_clear, total_entries_to_flush);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
/* We have now marked all the entries on the candidate list for
@@ -391,24 +378,24 @@ H5C_apply_candidate_list(H5F_t * f,
* If we don't do this, my experiments indicate that we will have a
* noticeably poorer hit ratio as a result.
*/
- if(H5C__flush_candidate_entries(f, entries_to_flush, entries_to_clear) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates failed")
+ if (H5C__flush_candidate_entries(f, entries_to_flush, entries_to_clear) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates failed")
/* If we've deferred writing to do it collectively, take care of that now */
- if(f->shared->coll_md_write) {
+ if (f->shared->coll_md_write) {
/* Sanity check */
HDassert(cache_ptr->coll_write_list);
/* Write collective list */
- if(H5C__collective_write(f) < 0)
+ if (H5C__collective_write(f) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "can't write metadata collectively")
} /* end if */
done:
- if(candidate_assignment_table != NULL)
+ if (candidate_assignment_table != NULL)
candidate_assignment_table = (unsigned *)H5MM_xfree((void *)candidate_assignment_table);
- if(cache_ptr->coll_write_list) {
- if(H5SL_close(cache_ptr->coll_write_list) < 0)
+ if (cache_ptr->coll_write_list) {
+ if (H5SL_close(cache_ptr->coll_write_list) < 0)
HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "failed to destroy skip list")
cache_ptr->coll_write_list = NULL;
} /* end if */
@@ -416,7 +403,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_apply_candidate_list() */
-
/*-------------------------------------------------------------------------
*
* Function: H5C_construct_candidate_list__clean_cache
@@ -436,82 +422,73 @@ done:
*
* Changes: With the slist optimization, the slist is not maintained
* unless a flush is in progress. Thus we can not longer use
- * cache_ptr->slist_size to determine the total size of
+ * cache_ptr->slist_size to determine the total size of
* the entries we must insert in the candidate list.
*
* To address this, we now use cache_ptr->dirty_index_size
- * instead.
+ * instead.
*
* JRM -- 7/27/20
*
*-------------------------------------------------------------------------
*/
herr_t
-H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
+H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
{
- size_t space_needed;
- herr_t ret_value = SUCCEED; /* Return value */
+ size_t space_needed;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
/* As a sanity check, set space needed to the dirty_index_size. This
* should be the sum total of the sizes of all the dirty entries
- * in the metadata cache. Note that if the slist is enabled,
+ * in the metadata cache. Note that if the slist is enabled,
* cache_ptr->slist_size should equal cache_ptr->dirty_index_size.
*/
space_needed = cache_ptr->dirty_index_size;
- HDassert( ( ! cache_ptr->slist_enabled ) ||
- ( space_needed == cache_ptr->slist_size ) );
-
+ HDassert((!cache_ptr->slist_enabled) || (space_needed == cache_ptr->slist_size));
/* Recall that while we shouldn't have any protected entries at this
* point, it is possible that some dirty entries may reside on the
* pinned list at this point.
*/
- HDassert( cache_ptr->dirty_index_size <=
- (cache_ptr->dLRU_list_size + cache_ptr->pel_size) );
- HDassert( ( ! cache_ptr->slist_enabled ) ||
- ( cache_ptr->slist_len <=
- (cache_ptr->dLRU_list_len + cache_ptr->pel_len) ) );
-
+ HDassert(cache_ptr->dirty_index_size <= (cache_ptr->dLRU_list_size + cache_ptr->pel_size));
+ HDassert((!cache_ptr->slist_enabled) ||
+ (cache_ptr->slist_len <= (cache_ptr->dLRU_list_len + cache_ptr->pel_len)));
- if(space_needed > 0) { /* we have work to do */
+ if (space_needed > 0) { /* we have work to do */
H5C_cache_entry_t *entry_ptr;
- unsigned nominated_entries_count = 0;
- size_t nominated_entries_size = 0;
- haddr_t nominated_addr;
+ unsigned nominated_entries_count = 0;
+ size_t nominated_entries_size = 0;
+ haddr_t nominated_addr;
- HDassert( ( ! cache_ptr->slist_enabled ) ||
- ( cache_ptr->slist_len > 0 ) );
+ HDassert((!cache_ptr->slist_enabled) || (cache_ptr->slist_len > 0));
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
*/
entry_ptr = cache_ptr->dLRU_tail_ptr;
- while ( ( nominated_entries_size < space_needed ) &&
- ( ( ! cache_ptr->slist_enabled ) ||
- ( nominated_entries_count < cache_ptr->slist_len ) ) &&
- ( entry_ptr != NULL ) ) {
+ while ((nominated_entries_size < space_needed) &&
+ ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) &&
+ (entry_ptr != NULL)) {
- HDassert( ! (entry_ptr->is_protected) );
- HDassert( ! (entry_ptr->is_read_only) );
- HDassert( entry_ptr->ro_ref_count == 0 );
- HDassert( entry_ptr->is_dirty );
- HDassert( ( ! cache_ptr->slist_enabled ) ||
- ( entry_ptr->in_slist ) );
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert(entry_ptr->ro_ref_count == 0);
+ HDassert(entry_ptr->is_dirty);
+ HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
nominated_addr = entry_ptr->addr;
- if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
+ if (H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5AC_add_candidate() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
@@ -519,32 +496,30 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
} /* end while */
- HDassert( entry_ptr == NULL );
+ HDassert(entry_ptr == NULL);
/* it is possible that there are some dirty entries on the
* protected entry list as well -- scan it too if necessary
*/
entry_ptr = cache_ptr->pel_head_ptr;
- while ( ( nominated_entries_size < space_needed ) &&
- ( ( ! cache_ptr->slist_enabled ) ||
- ( nominated_entries_count < cache_ptr->slist_len ) ) &&
- ( entry_ptr != NULL ) ) {
+ while ((nominated_entries_size < space_needed) &&
+ ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) &&
+ (entry_ptr != NULL)) {
- if(entry_ptr->is_dirty) {
+ if (entry_ptr->is_dirty) {
- HDassert( ! (entry_ptr->is_protected) );
- HDassert( ! (entry_ptr->is_read_only) );
- HDassert( entry_ptr->ro_ref_count == 0 );
- HDassert( entry_ptr->is_dirty );
- HDassert( entry_ptr->in_slist );
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert(entry_ptr->ro_ref_count == 0);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->in_slist);
nominated_addr = entry_ptr->addr;
- if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
+ if (H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5AC_add_candidate() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
@@ -555,9 +530,8 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
} /* end while */
- HDassert( ( ! cache_ptr->slist_enabled ) ||
- ( nominated_entries_count == cache_ptr->slist_len ) );
- HDassert( nominated_entries_size == space_needed );
+ HDassert((!cache_ptr->slist_enabled) || (nominated_entries_count == cache_ptr->slist_len));
+ HDassert(nominated_entries_size == space_needed);
} /* end if */
@@ -567,7 +541,6 @@ done:
} /* H5C_construct_candidate_list__clean_cache() */
-
/*-------------------------------------------------------------------------
* Function: H5C_construct_candidate_list__min_clean
*
@@ -585,7 +558,7 @@ done:
* 3/17/10
*
* Changes: With the slist optimization, the slist is not maintained
- * unless a flush is in progress. Updated sanity checks to
+ * unless a flush is in progress. Updated sanity checks to
* reflect this.
*
* JRM -- 7/27/20
@@ -593,81 +566,74 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr)
+H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
{
- size_t space_needed = 0;
- herr_t ret_value = SUCCEED; /* Return value */
+ size_t space_needed = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
/* compute the number of bytes (if any) that must be flushed to get the
* cache back within its min clean constraints.
*/
- if(cache_ptr->max_cache_size > cache_ptr->index_size) {
+ if (cache_ptr->max_cache_size > cache_ptr->index_size) {
- if ( ( (cache_ptr->max_cache_size - cache_ptr->index_size) +
- cache_ptr->cLRU_list_size) >= cache_ptr->min_clean_size ) {
-
- space_needed = 0;
+ if (((cache_ptr->max_cache_size - cache_ptr->index_size) + cache_ptr->cLRU_list_size) >=
+ cache_ptr->min_clean_size) {
- } else {
+ space_needed = 0;
+ }
+ else {
space_needed = cache_ptr->min_clean_size -
- ((cache_ptr->max_cache_size - cache_ptr->index_size) +
- cache_ptr->cLRU_list_size);
+ ((cache_ptr->max_cache_size - cache_ptr->index_size) + cache_ptr->cLRU_list_size);
}
} /* end if */
else {
- if(cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size) {
+ if (cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size) {
- space_needed = 0;
-
- } else {
+ space_needed = 0;
+ }
+ else {
- space_needed = cache_ptr->min_clean_size -
- cache_ptr->cLRU_list_size;
+ space_needed = cache_ptr->min_clean_size - cache_ptr->cLRU_list_size;
}
} /* end else */
- if(space_needed > 0) { /* we have work to do */
+ if (space_needed > 0) { /* we have work to do */
H5C_cache_entry_t *entry_ptr;
- unsigned nominated_entries_count = 0;
- size_t nominated_entries_size = 0;
+ unsigned nominated_entries_count = 0;
+ size_t nominated_entries_size = 0;
- HDassert( ( ! cache_ptr->slist_enabled ) ||
- ( cache_ptr->slist_len > 0 ) );
+ HDassert((!cache_ptr->slist_enabled) || (cache_ptr->slist_len > 0));
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
*/
entry_ptr = cache_ptr->dLRU_tail_ptr;
- while ( ( nominated_entries_size < space_needed ) &&
- ( ( ! cache_ptr->slist_enabled ) ||
- ( nominated_entries_count < cache_ptr->slist_len ) ) &&
- ( entry_ptr != NULL ) &&
- ( ! entry_ptr->flush_me_last ) ) {
+ while ((nominated_entries_size < space_needed) &&
+ ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) &&
+ (entry_ptr != NULL) && (!entry_ptr->flush_me_last)) {
- haddr_t nominated_addr;
+ haddr_t nominated_addr;
- HDassert( ! (entry_ptr->is_protected) );
- HDassert( ! (entry_ptr->is_read_only) );
- HDassert( entry_ptr->ro_ref_count == 0 );
- HDassert( entry_ptr->is_dirty );
- HDassert( ( ! cache_ptr->slist_enabled ) ||
- ( entry_ptr->in_slist ) );
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert(entry_ptr->ro_ref_count == 0);
+ HDassert(entry_ptr->is_dirty);
+ HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
nominated_addr = entry_ptr->addr;
- if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
+ if (H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5AC_add_candidate() failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
@@ -675,17 +641,15 @@ H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr)
} /* end while */
- HDassert( ( ! cache_ptr->slist_enabled ) ||
- ( nominated_entries_count <= cache_ptr->slist_len ) );
- HDassert( nominated_entries_size <= cache_ptr->dirty_index_size );
- HDassert( nominated_entries_size >= space_needed );
+ HDassert((!cache_ptr->slist_enabled) || (nominated_entries_count <= cache_ptr->slist_len));
+ HDassert(nominated_entries_size <= cache_ptr->dirty_index_size);
+ HDassert(nominated_entries_size >= space_needed);
} /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_construct_candidate_list__min_clean() */
-
/*-------------------------------------------------------------------------
*
* Function: H5C_mark_entries_as_clean
@@ -718,84 +682,77 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_mark_entries_as_clean(H5F_t * f,
- unsigned ce_array_len,
- haddr_t * ce_array_ptr)
+H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr)
{
- H5C_t * cache_ptr;
- unsigned entries_cleared;
- unsigned pinned_entries_cleared;
- hbool_t progress;
- unsigned entries_examined;
- unsigned initial_list_len;
- haddr_t addr;
- unsigned pinned_entries_marked = 0;
+ H5C_t * cache_ptr;
+ unsigned entries_cleared;
+ unsigned pinned_entries_cleared;
+ hbool_t progress;
+ unsigned entries_examined;
+ unsigned initial_list_len;
+ haddr_t addr;
+ unsigned pinned_entries_marked = 0;
#if H5C_DO_SANITY_CHECKS
- unsigned protected_entries_marked = 0;
- unsigned other_entries_marked = 0;
- haddr_t last_addr;
+ unsigned protected_entries_marked = 0;
+ unsigned other_entries_marked = 0;
+ haddr_t last_addr;
#endif /* H5C_DO_SANITY_CHECKS */
- H5C_cache_entry_t * clear_ptr = NULL;
- H5C_cache_entry_t * entry_ptr = NULL;
- unsigned u;
- herr_t ret_value = SUCCEED; /* Return value */
+ H5C_cache_entry_t *clear_ptr = NULL;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ unsigned u;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- HDassert( f );
- HDassert( f->shared );
+ HDassert(f);
+ HDassert(f->shared);
cache_ptr = f->shared->cache;
- HDassert( cache_ptr );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert( ce_array_len > 0 );
- HDassert( ce_array_ptr != NULL );
+ HDassert(ce_array_len > 0);
+ HDassert(ce_array_ptr != NULL);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if(H5C_validate_protected_entry_list(cache_ptr) < 0 ||
- H5C_validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C_validate_lru_list(cache_ptr) < 0)
+ if (H5C_validate_protected_entry_list(cache_ptr) < 0 || H5C_validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C_validate_lru_list(cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- for(u = 0; u < ce_array_len; u++) {
+ for (u = 0; u < ce_array_len; u++) {
addr = ce_array_ptr[u];
#if H5C_DO_SANITY_CHECKS
- if(u == 0)
+ if (u == 0)
last_addr = addr;
else {
- if(last_addr == addr)
+ if (last_addr == addr)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Duplicate entry in cleaned list")
- if(last_addr > addr)
+ if (last_addr > addr)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "cleaned list not sorted")
} /* end else */
#if H5C_DO_EXTREME_SANITY_CHECKS
- if(H5C_validate_protected_entry_list(cache_ptr) < 0
- || H5C_validate_pinned_entry_list(cache_ptr) < 0
- || H5C_validate_lru_list(cache_ptr) < 0)
+ if (H5C_validate_protected_entry_list(cache_ptr) < 0 ||
+ H5C_validate_pinned_entry_list(cache_ptr) < 0 || H5C_validate_lru_list(cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed in for loop")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
#endif /* H5C_DO_SANITY_CHECKS */
- HDassert( H5F_addr_defined(addr) );
+ HDassert(H5F_addr_defined(addr));
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
- if(entry_ptr == NULL) {
+ if (entry_ptr == NULL) {
#if H5C_DO_SANITY_CHECKS
- HDfprintf(stdout,
- "H5C_mark_entries_as_clean: entry[%u] = %" PRIuHADDR
- " not in cache.\n", u, addr);
+ HDfprintf(stdout, "H5C_mark_entries_as_clean: entry[%u] = %" PRIuHADDR " not in cache.\n", u,
+ addr);
#endif /* H5C_DO_SANITY_CHECKS */
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not in cache?!?!?")
} /* end if */
- else if(!entry_ptr->is_dirty) {
+ else if (!entry_ptr->is_dirty) {
#if H5C_DO_SANITY_CHECKS
- HDfprintf(stdout,
- "H5C_mark_entries_as_clean: entry %" PRIuHADDR
- " is not dirty!?!\n", addr);
+ HDfprintf(stdout, "H5C_mark_entries_as_clean: entry %" PRIuHADDR " is not dirty!?!\n", addr);
#endif /* H5C_DO_SANITY_CHECKS */
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not dirty?!?!?")
} /* end else-if */
@@ -807,16 +764,16 @@ H5C_mark_entries_as_clean(H5F_t * f,
/* Make sure first that we clear the collective flag from
it so it can be cleared */
- if(TRUE == entry_ptr->coll_access) {
+ if (TRUE == entry_ptr->coll_access) {
entry_ptr->coll_access = FALSE;
H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
} /* end if */
entry_ptr->clear_on_unprotect = TRUE;
- if(entry_ptr->is_pinned)
+ if (entry_ptr->is_pinned)
pinned_entries_marked++;
#if H5C_DO_SANITY_CHECKS
- else if(entry_ptr->is_protected)
+ else if (entry_ptr->is_protected)
protected_entries_marked++;
else
other_entries_marked++;
@@ -849,20 +806,20 @@ H5C_mark_entries_as_clean(H5F_t * f,
* point.
* JRM -- 4/7/15
*/
- entries_cleared = 0;
+ entries_cleared = 0;
entries_examined = 0;
initial_list_len = cache_ptr->LRU_list_len;
- entry_ptr = cache_ptr->LRU_tail_ptr;
- while(entry_ptr != NULL && entries_examined <= initial_list_len &&
- entries_cleared < ce_array_len) {
- if(entry_ptr->clear_on_unprotect) {
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+ while (entry_ptr != NULL && entries_examined <= initial_list_len && entries_cleared < ce_array_len) {
+ if (entry_ptr->clear_on_unprotect) {
entry_ptr->clear_on_unprotect = FALSE;
- clear_ptr = entry_ptr;
- entry_ptr = entry_ptr->prev;
+ clear_ptr = entry_ptr;
+ entry_ptr = entry_ptr->prev;
entries_cleared++;
- if(H5C__flush_single_entry(f, clear_ptr,
- (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__UPDATE_PAGE_BUFFER_FLAG)) < 0)
+ if (H5C__flush_single_entry(f, clear_ptr,
+ (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG |
+ H5C__UPDATE_PAGE_BUFFER_FLAG)) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear entry")
} /* end if */
else
@@ -871,69 +828,66 @@ H5C_mark_entries_as_clean(H5F_t * f,
} /* end while */
#if H5C_DO_SANITY_CHECKS
- HDassert( entries_cleared == other_entries_marked );
+ HDassert(entries_cleared == other_entries_marked);
#endif /* H5C_DO_SANITY_CHECKS */
/* It is also possible that some of the cleared entries are on the
* pinned list. Must scan that also.
*/
pinned_entries_cleared = 0;
- progress = TRUE;
- while((pinned_entries_cleared < pinned_entries_marked) && progress) {
- progress = FALSE;
+ progress = TRUE;
+ while ((pinned_entries_cleared < pinned_entries_marked) && progress) {
+ progress = FALSE;
entry_ptr = cache_ptr->pel_head_ptr;
- while(entry_ptr != NULL) {
- if(entry_ptr->clear_on_unprotect && entry_ptr->flush_dep_ndirty_children == 0) {
+ while (entry_ptr != NULL) {
+ if (entry_ptr->clear_on_unprotect && entry_ptr->flush_dep_ndirty_children == 0) {
entry_ptr->clear_on_unprotect = FALSE;
- clear_ptr = entry_ptr;
- entry_ptr = entry_ptr->next;
+ clear_ptr = entry_ptr;
+ entry_ptr = entry_ptr->next;
entries_cleared++;
pinned_entries_cleared++;
progress = TRUE;
- if(H5C__flush_single_entry(f, clear_ptr,
- (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__UPDATE_PAGE_BUFFER_FLAG)) < 0)
+ if (H5C__flush_single_entry(f, clear_ptr,
+ (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG |
+ H5C__UPDATE_PAGE_BUFFER_FLAG)) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear entry")
} /* end if */
else
entry_ptr = entry_ptr->next;
- } /* end while */
- } /* end while */
+ } /* end while */
+ } /* end while */
#if H5C_DO_SANITY_CHECKS
- HDassert( entries_cleared == pinned_entries_marked + other_entries_marked );
- HDassert( entries_cleared + protected_entries_marked == ce_array_len );
+ HDassert(entries_cleared == pinned_entries_marked + other_entries_marked);
+ HDassert(entries_cleared + protected_entries_marked == ce_array_len);
#endif /* H5C_DO_SANITY_CHECKS */
- HDassert( ( entries_cleared == ce_array_len ) ||
- ( (ce_array_len - entries_cleared) <= cache_ptr->pl_len ) );
+ HDassert((entries_cleared == ce_array_len) || ((ce_array_len - entries_cleared) <= cache_ptr->pl_len));
#if H5C_DO_SANITY_CHECKS
- u = 0;
+ u = 0;
entry_ptr = cache_ptr->pl_head_ptr;
- while ( entry_ptr != NULL )
- {
- if ( entry_ptr->clear_on_unprotect ) {
+ while (entry_ptr != NULL) {
+ if (entry_ptr->clear_on_unprotect) {
u++;
}
entry_ptr = entry_ptr->next;
}
- HDassert( (entries_cleared + u) == ce_array_len );
+ HDassert((entries_cleared + u) == ce_array_len);
#endif /* H5C_DO_SANITY_CHECKS */
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if(H5C_validate_protected_entry_list(cache_ptr) < 0
- || H5C_validate_pinned_entry_list(cache_ptr) < 0
- || H5C_validate_lru_list(cache_ptr) < 0)
+ if (H5C_validate_protected_entry_list(cache_ptr) < 0 || H5C_validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C_validate_lru_list(cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_mark_entries_as_clean() */
-
/*-------------------------------------------------------------------------
*
* Function: H5C_clear_coll_entries
@@ -951,15 +905,15 @@ done:
herr_t
H5C_clear_coll_entries(H5C_t *cache_ptr, hbool_t partial)
{
- uint32_t clear_cnt;
- H5C_cache_entry_t * entry_ptr = NULL;
- herr_t ret_value = SUCCEED;
+ uint32_t clear_cnt;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT
entry_ptr = cache_ptr->coll_tail_ptr;
clear_cnt = (partial ? cache_ptr->coll_list_len / 2 : cache_ptr->coll_list_len);
- while(entry_ptr && clear_cnt > 0) {
+ while (entry_ptr && clear_cnt > 0) {
H5C_cache_entry_t *prev_ptr = entry_ptr->coll_prev;
/* Sanity check */
@@ -980,7 +934,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_clear_coll_entries */
-
/*-------------------------------------------------------------------------
*
* Function: H5C__collective_write
@@ -997,21 +950,21 @@ done:
static herr_t
H5C__collective_write(H5F_t *f)
{
- H5AC_t *cache_ptr;
- H5FD_mpio_xfer_t orig_xfer_mode = H5FD_MPIO_COLLECTIVE;
- void *base_buf;
- int count;
- int *length_array = NULL;
- MPI_Aint *buf_array = NULL;
- MPI_Aint *offset_array = NULL;
- MPI_Datatype btype;
- hbool_t btype_created = FALSE;
- MPI_Datatype ftype;
- hbool_t ftype_created = FALSE;
- int mpi_code;
- char unused = 0; /* Unused, except for non-NULL pointer value */
- size_t buf_count;
- herr_t ret_value = SUCCEED;
+ H5AC_t * cache_ptr;
+ H5FD_mpio_xfer_t orig_xfer_mode = H5FD_MPIO_COLLECTIVE;
+ void * base_buf;
+ int count;
+ int * length_array = NULL;
+ MPI_Aint * buf_array = NULL;
+ MPI_Aint * offset_array = NULL;
+ MPI_Datatype btype;
+ hbool_t btype_created = FALSE;
+ MPI_Datatype ftype;
+ hbool_t ftype_created = FALSE;
+ int mpi_code;
+ char unused = 0; /* Unused, except for non-NULL pointer value */
+ size_t buf_count;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -1022,49 +975,52 @@ H5C__collective_write(H5F_t *f)
HDassert(cache_ptr->coll_write_list != NULL);
/* Get original transfer mode */
- if(H5CX_get_io_xfer_mode(&orig_xfer_mode) < 0)
+ if (H5CX_get_io_xfer_mode(&orig_xfer_mode) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't get MPI-I/O transfer mode")
/* Set transfer mode */
- if(H5CX_set_io_xfer_mode(H5FD_MPIO_COLLECTIVE) < 0)
+ if (H5CX_set_io_xfer_mode(H5FD_MPIO_COLLECTIVE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set MPI-I/O transfer mode")
/* Get number of entries in collective write list */
count = (int)H5SL_count(cache_ptr->coll_write_list);
- if(count > 0) {
- H5SL_node_t *node;
- H5C_cache_entry_t *entry_ptr;
- int i;
+ if (count > 0) {
+ H5SL_node_t * node;
+ H5C_cache_entry_t *entry_ptr;
+ int i;
/* Allocate arrays */
- if(NULL == (length_array = (int *)H5MM_malloc((size_t)count * sizeof(int))) )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective write table length array")
- if(NULL == (buf_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))) )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective buf table length array")
- if(NULL == (offset_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))) )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective offset table length array")
+ if (NULL == (length_array = (int *)H5MM_malloc((size_t)count * sizeof(int))))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for collective write table length array")
+ if (NULL == (buf_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for collective buf table length array")
+ if (NULL == (offset_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for collective offset table length array")
/* Fill arrays */
node = H5SL_first(cache_ptr->coll_write_list);
HDassert(node);
- if(NULL == (entry_ptr = (H5C_cache_entry_t *)H5SL_item(node)))
+ if (NULL == (entry_ptr = (H5C_cache_entry_t *)H5SL_item(node)))
HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item")
/* Set up initial array position & buffer base address */
length_array[0] = (int)entry_ptr->size;
- base_buf = entry_ptr->image_ptr;
- buf_array[0] = (MPI_Aint)0;
+ base_buf = entry_ptr->image_ptr;
+ buf_array[0] = (MPI_Aint)0;
offset_array[0] = (MPI_Aint)entry_ptr->addr;
node = H5SL_next(node);
- i = 1;
- while(node) {
- if(NULL == (entry_ptr = (H5C_cache_entry_t *)H5SL_item(node)))
+ i = 1;
+ while (node) {
+ if (NULL == (entry_ptr = (H5C_cache_entry_t *)H5SL_item(node)))
HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item")
/* Set up array position */
length_array[i] = (int)entry_ptr->size;
- buf_array[i] = (MPI_Aint)entry_ptr->image_ptr - (MPI_Aint)base_buf;
+ buf_array[i] = (MPI_Aint)entry_ptr->image_ptr - (MPI_Aint)base_buf;
offset_array[i] = (MPI_Aint)entry_ptr->addr;
/* Advance to next node & array location */
@@ -1073,17 +1029,19 @@ H5C__collective_write(H5F_t *f)
} /* end while */
/* Create memory MPI type */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, buf_array, MPI_BYTE, &btype)))
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_hindexed(count, length_array, buf_array, MPI_BYTE, &btype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
btype_created = TRUE;
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&btype)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(&btype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
/* Create file MPI type */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, offset_array, MPI_BYTE, &ftype)))
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_hindexed(count, length_array, offset_array, MPI_BYTE, &ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
ftype_created = TRUE;
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&ftype)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(&ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
/* MPI count to write */
@@ -1102,34 +1060,33 @@ H5C__collective_write(H5F_t *f)
} /* end else */
/* Pass buf type, file type to the file driver */
- if(H5CX_set_mpi_coll_datatypes(btype, ftype) < 0)
+ if (H5CX_set_mpi_coll_datatypes(btype, ftype) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set MPI-I/O properties")
/* Write data */
- if(H5F_block_write(f, H5FD_MEM_DEFAULT, (haddr_t)0, buf_count, base_buf) < 0)
+ if (H5F_block_write(f, H5FD_MEM_DEFAULT, (haddr_t)0, buf_count, base_buf) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "unable to write entries collectively")
done:
/* Free arrays */
length_array = (int *)H5MM_xfree(length_array);
- buf_array = (MPI_Aint *)H5MM_xfree(buf_array);
+ buf_array = (MPI_Aint *)H5MM_xfree(buf_array);
offset_array = (MPI_Aint *)H5MM_xfree(offset_array);
/* Free MPI Types */
- if(btype_created && MPI_SUCCESS != (mpi_code = MPI_Type_free(&btype)))
+ if (btype_created && MPI_SUCCESS != (mpi_code = MPI_Type_free(&btype)))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- if(ftype_created && MPI_SUCCESS != (mpi_code = MPI_Type_free(&ftype)))
+ if (ftype_created && MPI_SUCCESS != (mpi_code = MPI_Type_free(&ftype)))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
/* Reset transfer mode in API context, if changed */
- if(orig_xfer_mode != H5FD_MPIO_COLLECTIVE)
- if(H5CX_set_io_xfer_mode(orig_xfer_mode) < 0)
+ if (orig_xfer_mode != H5FD_MPIO_COLLECTIVE)
+ if (H5CX_set_io_xfer_mode(orig_xfer_mode) < 0)
HDONE_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set MPI-I/O transfer mode")
FUNC_LEAVE_NOAPI(ret_value);
} /* end H5C__collective_write() */
-
/*-------------------------------------------------------------------------
* Function: H5C__flush_candidate_entries
*
@@ -1170,20 +1127,20 @@ done:
*/
static herr_t
H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES],
- unsigned entries_to_clear[H5C_RING_NTYPES])
+ unsigned entries_to_clear[H5C_RING_NTYPES])
{
#if H5C_DO_SANITY_CHECKS
- int i;
- uint32_t index_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
- uint32_t slist_len = 0;
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- H5C_ring_t ring;
- H5C_t * cache_ptr;
- herr_t ret_value = SUCCEED;
+ H5C_ring_t ring;
+ H5C_t * cache_ptr;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -1207,7 +1164,7 @@ H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES
HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
index_len += cache_ptr->index_ring_len[i];
index_size += cache_ptr->index_ring_size[i];
clean_index_size += cache_ptr->clean_index_ring_size[i];
@@ -1226,9 +1183,8 @@ H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES
#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_DO_EXTREME_SANITY_CHECKS
- if(H5C_validate_protected_entry_list(cache_ptr) < 0
- || H5C_validate_pinned_entry_list(cache_ptr) < 0
- || H5C_validate_lru_list(cache_ptr) < 0)
+ if (H5C_validate_protected_entry_list(cache_ptr) < 0 || H5C_validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C_validate_lru_list(cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1238,12 +1194,12 @@ H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES
* working inward.
*/
ring = H5C_RING_USER;
- while(ring < H5C_RING_NTYPES) {
- if(H5C__flush_candidates_in_ring(f, ring, entries_to_flush[ring], entries_to_clear[ring]) < 0)
+ while (ring < H5C_RING_NTYPES) {
+ if (H5C__flush_candidates_in_ring(f, ring, entries_to_flush[ring], entries_to_clear[ring]) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates in ring failed")
ring++;
- } /* end while */
+ } /* end while */
done:
cache_ptr->flush_in_progress = FALSE;
@@ -1251,7 +1207,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__flush_candidate_entries() */
-
/*-------------------------------------------------------------------------
* Function: H5C__flush_candidates_in_ring
*
@@ -1289,25 +1244,23 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
- unsigned entries_to_flush, unsigned entries_to_clear)
+H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flush, unsigned entries_to_clear)
{
- H5C_t * cache_ptr;
- hbool_t progress;
- hbool_t restart_scan = FALSE;
- unsigned entries_flushed = 0;
- unsigned entries_cleared = 0;
+ H5C_t * cache_ptr;
+ hbool_t progress;
+ hbool_t restart_scan = FALSE;
+ unsigned entries_flushed = 0;
+ unsigned entries_cleared = 0;
#if H5C_DO_SANITY_CHECKS
- unsigned init_index_len;
+ unsigned init_index_len;
#endif /* H5C_DO_SANITY_CHECKS */
- unsigned clear_flags = H5C__FLUSH_CLEAR_ONLY_FLAG |
- H5C__GENERATE_IMAGE_FLAG |
- H5C__UPDATE_PAGE_BUFFER_FLAG;
- unsigned flush_flags = H5C__NO_FLAGS_SET;
- unsigned op_flags;
+ unsigned clear_flags =
+ H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__UPDATE_PAGE_BUFFER_FLAG;
+ unsigned flush_flags = H5C__NO_FLAGS_SET;
+ unsigned op_flags;
H5C_cache_entry_t *op_ptr;
H5C_cache_entry_t *entry_ptr;
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -1322,9 +1275,8 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
HDassert(ring < H5C_RING_NTYPES);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1343,27 +1295,27 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
* It is possible that this will change -- hence the assertion.
*/
restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
- while(((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear))
- && (entry_ptr != NULL)) {
- hbool_t prev_is_dirty = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+ while (((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear)) &&
+ (entry_ptr != NULL)) {
+ hbool_t prev_is_dirty = FALSE;
H5C_cache_entry_t *next_ptr;
/* Entries in the LRU must not have flush dependency children */
HDassert(entry_ptr->flush_dep_nchildren == 0);
/* Remember dirty state of entry to advance to */
- if(entry_ptr->prev != NULL)
+ if (entry_ptr->prev != NULL)
prev_is_dirty = entry_ptr->prev->is_dirty;
/* If the entry is in the ring */
- if(entry_ptr->ring == ring) {
+ if (entry_ptr->ring == ring) {
/* If this process needs to clear this entry. */
- if(entry_ptr->clear_on_unprotect) {
+ if (entry_ptr->clear_on_unprotect) {
HDassert(entry_ptr->is_dirty);
/* Set entry and flags for operation */
- op_ptr = entry_ptr;
+ op_ptr = entry_ptr;
op_flags = clear_flags;
/* Set next entry appropriately */
@@ -1373,11 +1325,11 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
entry_ptr->clear_on_unprotect = FALSE;
entries_cleared++;
} /* end if */
- else if(entry_ptr->flush_immediately) {
+ else if (entry_ptr->flush_immediately) {
HDassert(entry_ptr->is_dirty);
/* Set entry and flags for operation */
- op_ptr = entry_ptr;
+ op_ptr = entry_ptr;
op_flags = flush_flags;
/* Set next entry appropriately */
@@ -1399,7 +1351,7 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
entry_ptr = entry_ptr->prev;
/* Check for operation */
- if(op_ptr) {
+ if (op_ptr) {
/* reset entries_removed_counter and
* last_entry_removed_ptr prior to the call to
* H5C__flush_single_entry() so that we can spot
@@ -1418,14 +1370,13 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
cache_ptr->entries_removed_counter = 0;
cache_ptr->last_entry_removed_ptr = NULL;
- if(H5C__flush_single_entry(f, op_ptr, op_flags) < 0)
+ if (H5C__flush_single_entry(f, op_ptr, op_flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't flush entry")
- if(cache_ptr->entries_removed_counter != 0
- || cache_ptr->last_entry_removed_ptr != NULL)
+ if (cache_ptr->entries_removed_counter != 0 || cache_ptr->last_entry_removed_ptr != NULL)
restart_scan = TRUE;
} /* end if */
- } /* end if */
+ } /* end if */
else {
/* Remember "next" pointer (after advancing entries) */
next_ptr = entry_ptr;
@@ -1435,10 +1386,9 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
} /* end else */
/* Check for restarts, etc. */
- if((entry_ptr != NULL) &&
- (restart_scan || (entry_ptr->is_dirty != prev_is_dirty)
- || (entry_ptr->next != next_ptr) || entry_ptr->is_protected
- || entry_ptr->is_pinned)) {
+ if ((entry_ptr != NULL) &&
+ (restart_scan || (entry_ptr->is_dirty != prev_is_dirty) || (entry_ptr->next != next_ptr) ||
+ entry_ptr->is_protected || entry_ptr->is_pinned)) {
/* Something has happened to the LRU -- start over
* from the tail.
@@ -1458,11 +1408,11 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
HDassert(FALSE); /* see comment above */
restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
} /* end if */
- } /* end while */
+ } /* end while */
/* It is also possible that some of the cleared entries are on the
* pinned list. Must scan that also.
@@ -1486,26 +1436,26 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
* such changes and cause this function to fail if they are detected.
*/
progress = TRUE;
- while(progress && ((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear))) {
- progress = FALSE;
+ while (progress && ((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear))) {
+ progress = FALSE;
entry_ptr = cache_ptr->pel_head_ptr;
- while((entry_ptr != NULL) &&
- ((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear))) {
+ while ((entry_ptr != NULL) &&
+ ((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear))) {
H5C_cache_entry_t *prev_ptr;
- hbool_t next_is_dirty = FALSE;
+ hbool_t next_is_dirty = FALSE;
HDassert(entry_ptr->is_pinned);
/* Remember dirty state of entry to advance to */
- if(entry_ptr->next != NULL)
+ if (entry_ptr->next != NULL)
next_is_dirty = entry_ptr->next->is_dirty;
- if(entry_ptr->ring == ring && entry_ptr->flush_dep_ndirty_children == 0) {
- if(entry_ptr->clear_on_unprotect) {
+ if (entry_ptr->ring == ring && entry_ptr->flush_dep_ndirty_children == 0) {
+ if (entry_ptr->clear_on_unprotect) {
HDassert(entry_ptr->is_dirty);
/* Set entry and flags for operation */
- op_ptr = entry_ptr;
+ op_ptr = entry_ptr;
op_flags = clear_flags;
/* Reset entry flag */
@@ -1513,11 +1463,11 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
entries_cleared++;
progress = TRUE;
} /* end if */
- else if(entry_ptr->flush_immediately) {
+ else if (entry_ptr->flush_immediately) {
HDassert(entry_ptr->is_dirty);
/* Set entry and flags for operation */
- op_ptr = entry_ptr;
+ op_ptr = entry_ptr;
op_flags = flush_flags;
/* Reset entry flag */
@@ -1530,7 +1480,7 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
op_ptr = NULL;
/* Check for operation */
- if(op_ptr) {
+ if (op_ptr) {
/* reset entries_removed_counter and
* last_entry_removed_ptr prior to the call to
* H5C__flush_single_entry() so that we can spot
@@ -1556,14 +1506,13 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
*
* JRM -- 2/9/17
*/
- if(H5C__flush_single_entry(f, op_ptr, op_flags) < 0)
+ if (H5C__flush_single_entry(f, op_ptr, op_flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't flush entry")
- if(cache_ptr->entries_removed_counter != 0
- || cache_ptr->last_entry_removed_ptr != NULL)
+ if (cache_ptr->entries_removed_counter != 0 || cache_ptr->last_entry_removed_ptr != NULL)
restart_scan = TRUE;
} /* end if */
- } /* end if */
+ } /* end if */
/* Remember "previous" pointer (after advancing entries) */
prev_ptr = entry_ptr;
@@ -1572,10 +1521,9 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
entry_ptr = entry_ptr->next;
/* Check for restarts, etc. */
- if((entry_ptr != NULL) &&
- (restart_scan || (entry_ptr->is_dirty != next_is_dirty)
- || (entry_ptr->prev != prev_ptr) || entry_ptr->is_protected
- || !entry_ptr->is_pinned)) {
+ if ((entry_ptr != NULL) &&
+ (restart_scan || (entry_ptr->is_dirty != next_is_dirty) || (entry_ptr->prev != prev_ptr) ||
+ entry_ptr->is_protected || !entry_ptr->is_pinned)) {
/* Something has happened to the pinned entry list -- start
* over from the head.
*
@@ -1606,22 +1554,22 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
* H5C__UPDATE_STATS_FOR_PEL_SCAN_RESTART(cache_ptr)
*/
} /* end if */
- } /* end while ( ( entry_ptr != NULL ) &&
- * ( ( entries_flushed > entries_to_flush ) ||
- * ( entries_cleared > entries_to_clear ) ) )
- */
- } /* end while ( ( ( entries_flushed > entries_to_flush ) ||
- * ( entries_cleared > entries_to_clear ) ) &&
- * ( progress ) )
- */
+ } /* end while ( ( entry_ptr != NULL ) &&
+ * ( ( entries_flushed > entries_to_flush ) ||
+ * ( entries_cleared > entries_to_clear ) ) )
+ */
+ } /* end while ( ( ( entries_flushed > entries_to_flush ) ||
+ * ( entries_cleared > entries_to_clear ) ) &&
+ * ( progress ) )
+ */
#if H5C_DO_SANITY_CHECKS
HDassert(init_index_len == cache_ptr->index_len);
#endif /* H5C_DO_SANITY_CHECKS */
- if(entries_flushed != entries_to_flush || entries_cleared != entries_to_clear) {
+ if (entries_flushed != entries_to_flush || entries_cleared != entries_to_clear) {
entry_ptr = cache_ptr->il_head;
- while(entry_ptr != NULL) {
+ while (entry_ptr != NULL) {
HDassert(!entry_ptr->clear_on_unprotect || (entry_ptr->ring > ring));
HDassert(!entry_ptr->flush_immediately || (entry_ptr->ring > ring));
entry_ptr = entry_ptr->il_next;
@@ -1634,4 +1582,3 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__flush_candidates_in_ring() */
#endif /* H5_HAVE_PARALLEL */
-