summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorDana Robinson <derobins@hdfgroup.org>2020-05-26 20:07:43 (GMT)
committerDana Robinson <derobins@hdfgroup.org>2020-05-26 20:07:43 (GMT)
commit2477b6014582cd24a91d2b1daf0e5c451eda9b3e (patch)
tree1082eeda8c22c18bcd45cc11e18fb19281da817f /src
parent7746c3a45a8c920e51ba88da7be14cc075be7f17 (diff)
downloadhdf5-2477b6014582cd24a91d2b1daf0e5c451eda9b3e.zip
hdf5-2477b6014582cd24a91d2b1daf0e5c451eda9b3e.tar.gz
hdf5-2477b6014582cd24a91d2b1daf0e5c451eda9b3e.tar.bz2
Removed trailing whitespace from source files.
Diffstat (limited to 'src')
-rw-r--r--src/H5A.c2
-rw-r--r--src/H5ACdbg.c10
-rw-r--r--src/H5ACmpio.c256
-rw-r--r--src/H5ACpkg.h42
-rw-r--r--src/H5ACprivate.h12
-rw-r--r--src/H5Adense.c12
-rw-r--r--src/H5B2.c6
-rw-r--r--src/H5B2int.c12
-rw-r--r--src/H5B2internal.c4
-rw-r--r--src/H5B2pkg.h2
-rw-r--r--src/H5C.c412
-rw-r--r--src/H5CX.c22
-rw-r--r--src/H5Cdbg.c86
-rw-r--r--src/H5Cepoch.c10
-rw-r--r--src/H5Cimage.c538
-rw-r--r--src/H5Clog.c4
-rw-r--r--src/H5Clog_json.c74
-rw-r--r--src/H5Clog_trace.c38
-rw-r--r--src/H5Cpkg.h306
-rw-r--r--src/H5Cprefetched.c42
-rw-r--r--src/H5Cprivate.h556
-rw-r--r--src/H5Cquery.c4
-rw-r--r--src/H5Ctag.c72
-rw-r--r--src/H5D.c18
-rw-r--r--src/H5Dbtree2.c26
-rw-r--r--src/H5Dcontig.c6
-rw-r--r--src/H5Dfill.c2
-rw-r--r--src/H5Dio.c32
-rw-r--r--src/H5Dmpio.c48
-rw-r--r--src/H5Dnone.c8
-rw-r--r--src/H5Dpublic.h4
-rw-r--r--src/H5Dsingle.c6
-rw-r--r--src/H5EAcache.c8
-rw-r--r--src/H5F.c2
-rw-r--r--src/H5FAcache.c12
-rw-r--r--src/H5FDcore.c40
-rw-r--r--src/H5FDhdfs.h2
-rw-r--r--src/H5FDint.c6
-rw-r--r--src/H5FDlog.c12
-rw-r--r--src/H5FDmpi.c4
-rw-r--r--src/H5FDmulti.c10
-rw-r--r--src/H5FDprivate.h2
-rw-r--r--src/H5FDpublic.h12
-rw-r--r--src/H5FDsec2.c18
-rw-r--r--src/H5FDstdio.c10
-rw-r--r--src/H5FS.c2
-rw-r--r--src/H5FScache.c138
-rw-r--r--src/H5FSint.c6
-rw-r--r--src/H5FSsection.c36
-rw-r--r--src/H5Fdeprec.c6
-rw-r--r--src/H5Fefc.c2
-rw-r--r--src/H5Fint.c2
-rw-r--r--src/H5Fio.c2
-rw-r--r--src/H5Fmpi.c4
-rw-r--r--src/H5Fpkg.h6
-rw-r--r--src/H5Fsuper_cache.c16
-rw-r--r--src/H5G.c4
-rw-r--r--src/H5Gcache.c10
-rw-r--r--src/H5Gcompact.c2
-rw-r--r--src/H5Gint.c4
-rw-r--r--src/H5Gnode.c4
-rw-r--r--src/H5Gpkg.h4
-rw-r--r--src/H5Gprivate.h2
-rw-r--r--src/H5Gtraverse.c6
-rw-r--r--src/H5HFbtree2.c4
-rw-r--r--src/H5HFcache.c612
-rw-r--r--src/H5HFiblock.c2
-rw-r--r--src/H5HFman.c8
-rw-r--r--src/H5HFpkg.h8
-rw-r--r--src/H5HFsection.c2
-rw-r--r--src/H5HFtiny.c4
-rw-r--r--src/H5HGcache.c28
-rw-r--r--src/H5HLcache.c50
-rw-r--r--src/H5HLdblk.c2
-rw-r--r--src/H5HLint.c2
-rw-r--r--src/H5L.c4
-rw-r--r--src/H5MF.c324
-rw-r--r--src/H5MFaggr.c6
-rw-r--r--src/H5MFdbg.c4
-rw-r--r--src/H5MFprivate.h6
-rw-r--r--src/H5MFsection.c10
-rw-r--r--src/H5Oattr.c2
-rw-r--r--src/H5Oattribute.c2
-rw-r--r--src/H5Ocache.c58
-rw-r--r--src/H5Ocache_image.c10
-rw-r--r--src/H5Odtype.c2
-rw-r--r--src/H5Olayout.c2
-rw-r--r--src/H5Oprivate.h2
-rw-r--r--src/H5R.c2
-rw-r--r--src/H5S.c6
-rw-r--r--src/H5SM.c2
-rw-r--r--src/H5SMcache.c18
-rw-r--r--src/H5Shyper.c28
-rw-r--r--src/H5Smpio.c66
-rw-r--r--src/H5Snone.c2
-rw-r--r--src/H5Spoint.c6
-rw-r--r--src/H5Sprivate.h2
-rw-r--r--src/H5Sselect.c118
-rw-r--r--src/H5TS.c28
-rw-r--r--src/H5Torder.c10
-rw-r--r--src/H5Tvlen.c2
-rw-r--r--src/H5UC.c2
-rw-r--r--src/H5UCprivate.h2
-rw-r--r--src/H5VM.c2
104 files changed, 2258 insertions, 2258 deletions
diff --git a/src/H5A.c b/src/H5A.c
index 3af0560..1bbbbc1 100644
--- a/src/H5A.c
+++ b/src/H5A.c
@@ -1129,7 +1129,7 @@ H5Arename(hid_t loc_id, const char *old_name, const char *new_name)
/* Avoid thrashing things if the names are the same */
if(HDstrcmp(old_name, new_name)) {
H5G_loc_t loc; /* Object location */
-
+
if(H5G_loc(loc_id, &loc) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a location")
diff --git a/src/H5ACdbg.c b/src/H5ACdbg.c
index 1235206..b40f8d0 100644
--- a/src/H5ACdbg.c
+++ b/src/H5ACdbg.c
@@ -253,8 +253,8 @@ H5AC_flush_dependency_exists(H5F_t *f, haddr_t parent_addr, haddr_t child_addr,
* type field contains the expected value.
*
* If the specified entry is in cache, *in_cache_ptr is set
- * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending
- * on whether the entries type field matches the
+ * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending
+ * on whether the entries type field matches the
* expected_type parameter
*
* If the target entry is not in cache, *in_cache_ptr is
@@ -278,8 +278,8 @@ H5AC_flush_dependency_exists(H5F_t *f, haddr_t parent_addr, haddr_t child_addr,
*/
#ifndef NDEBUG
herr_t
-H5AC_verify_entry_type(const H5F_t *f, haddr_t addr,
- const H5AC_class_t *expected_type, hbool_t *in_cache_ptr,
+H5AC_verify_entry_type(const H5F_t *f, haddr_t addr,
+ const H5AC_class_t *expected_type, hbool_t *in_cache_ptr,
hbool_t *type_ok_ptr)
{
H5C_t * cache_ptr;
@@ -304,7 +304,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5AC_get_serialization_in_progress
*
- * Purpose: Return the current value of
+ * Purpose: Return the current value of
* cache_ptr->serialization_in_progress.
*
* Return: Current value of cache_ptr->serialization_in_progress.
diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c
index b60b933..5e4e6b4 100644
--- a/src/H5ACmpio.c
+++ b/src/H5ACmpio.c
@@ -135,7 +135,7 @@ H5FL_DEFINE_STATIC(H5AC_slist_entry_t);
/*-------------------------------------------------------------------------
* Function: H5AC__set_sync_point_done_callback
*
- * Purpose: Set the value of the sync_point_done callback. This
+ * Purpose: Set the value of the sync_point_done callback. This
* callback is used by the parallel test code to verify
* that the expected writes and only the expected writes
* take place during a sync point.
@@ -204,7 +204,7 @@ H5AC__set_write_done_callback(H5C_t * cache_ptr, void (* write_done)(void))
* Function: H5AC_add_candidate()
*
* Purpose: Add the supplied metadata entry address to the candidate
- * list. Verify that each entry added does not appear in
+ * list. Verify that each entry added does not appear in
* the list prior to its insertion.
*
* This function is intended for used in constructing list
@@ -261,11 +261,11 @@ done:
*
* Purpose: Broadcast the contents of the process 0 candidate entry
* slist. In passing, also remove all entries from said
- * list. As the application of this will be handled by
- * the same functions on all processes, construct and
+ * list. As the application of this will be handled by
+ * the same functions on all processes, construct and
* return a copy of the list in the same format as that
* received by the other processes. Note that if this
- * copy is returned in *haddr_buf_ptr_ptr, the caller
+ * copy is returned in *haddr_buf_ptr_ptr, the caller
* must free it.
*
* This function must only be called by the process with
@@ -317,7 +317,7 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr,
unsigned chk_num_entries = 0;
/* convert the candidate list into the format we
- * are used to receiving from process 0, and also load it
+ * are used to receiving from process 0, and also load it
* into a buffer for transmission.
*/
if(H5AC__copy_candidate_list_to_buffer(cache_ptr, &chk_num_entries, &haddr_buf_ptr) < 0)
@@ -331,7 +331,7 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr,
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
} /* end if */
- /* Pass the number of entries and the buffer pointer
+ /* Pass the number of entries and the buffer pointer
* back to the caller. Do this so that we can use the same code
* to apply the candidate list to all the processes.
*/
@@ -481,10 +481,10 @@ done:
/*-------------------------------------------------------------------------
* Function: H5AC__construct_candidate_list()
*
- * Purpose: In the parallel case when the metadata_write_strategy is
+ * Purpose: In the parallel case when the metadata_write_strategy is
* H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, process 0 uses
- * this function to construct the list of cache entries to
- * be flushed. This list is then propagated to the other
+ * this function to construct the list of cache entries to
+ * be flushed. This list is then propagated to the other
* caches, and then flushed in a distributed fashion.
*
* The sync_point_op parameter is used to determine the extent
@@ -581,22 +581,22 @@ H5AC__copy_candidate_list_to_buffer_cb(void *_item, void H5_ATTR_UNUSED *_key,
* Function: H5AC__copy_candidate_list_to_buffer
*
* Purpose: Allocate buffer(s) and copy the contents of the candidate
- * entry slist into it (them). In passing, remove all
- * entries from the candidate slist. Note that the
+ * entry slist into it (them). In passing, remove all
+ * entries from the candidate slist. Note that the
* candidate slist must not be empty.
*
* If MPI_Offset_buf_ptr_ptr is not NULL, allocate a buffer
* of MPI_Offset, copy the contents of the candidate
- * entry list into it with the appropriate conversions,
- * and return the base address of the buffer in
+ * entry list into it with the appropriate conversions,
+ * and return the base address of the buffer in
* *MPI_Offset_buf_ptr. Note that this is the buffer
- * used by process 0 to transmit the list of entries to
+ * used by process 0 to transmit the list of entries to
* be flushed to all other processes (in this file group).
*
* Similarly, allocate a buffer of haddr_t, load the contents
- * of the candidate list into this buffer, and return its
- * base address in *haddr_buf_ptr_ptr. Note that this
- * latter buffer is constructed unconditionally.
+ * of the candidate list into this buffer, and return its
+ * base address in *haddr_buf_ptr_ptr. Note that this
+ * latter buffer is constructed unconditionally.
*
* In passing, also remove all entries from the candidate
* entry slist.
@@ -635,8 +635,8 @@ H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, unsigned *num_entri
num_entries = (unsigned)H5SL_count(aux_ptr->candidate_slist_ptr);
- /* allocate a buffer(s) to store the list of candidate entry
- * base addresses in
+ /* allocate a buffer(s) to store the list of candidate entry
+ * base addresses in
*/
buf_size = sizeof(haddr_t) * num_entries;
if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
@@ -651,7 +651,7 @@ H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, unsigned *num_entri
if(H5SL_free(aux_ptr->candidate_slist_ptr, H5AC__copy_candidate_list_to_buffer_cb, &udata) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "Can't build address list for candidate entries")
- /* Pass the number of entries and the buffer pointer
+ /* Pass the number of entries and the buffer pointer
* back to the caller.
*/
*num_entries_ptr = num_entries;
@@ -1133,32 +1133,32 @@ done:
/*-------------------------------------------------------------------------
* Function: H5AC__propagate_and_apply_candidate_list
*
- * Purpose: Prior to the addition of support for multiple metadata
- * write strategies, in PHDF5, only the metadata cache with
- * mpi rank 0 was allowed to write to file. All other
- * metadata caches on processes with rank greater than 0
- * were required to retain dirty entries until they were
+ * Purpose: Prior to the addition of support for multiple metadata
+ * write strategies, in PHDF5, only the metadata cache with
+ * mpi rank 0 was allowed to write to file. All other
+ * metadata caches on processes with rank greater than 0
+ * were required to retain dirty entries until they were
* notified that the entry was clean.
*
- * This constraint is relaxed with the distributed
+ * This constraint is relaxed with the distributed
* metadata write strategy, in which a list of candidate
* metadata cache entries is constructed by the process 0
* cache and then distributed to the caches of all the other
- * processes. Once the listed is distributed, many (if not
- * all) processes writing writing a unique subset of the
- * entries, and marking the remainder clean. The subsets
- * are chosen so that each entry in the list of candidates
- * is written by exactly one cache, and all entries are
+ * processes. Once the listed is distributed, many (if not
+ * all) processes writing writing a unique subset of the
+ * entries, and marking the remainder clean. The subsets
+ * are chosen so that each entry in the list of candidates
+ * is written by exactly one cache, and all entries are
* marked as being clean in all caches.
*
- * While the list of candidate cache entries is prepared
+ * While the list of candidate cache entries is prepared
* elsewhere, this function is the main routine for distributing
- * and applying the list. It must be run simultaniously on
+ * and applying the list. It must be run simultaniously on
* all processes that have the relevant file open. To ensure
- * proper synchronization, there is a barrier at the beginning
+ * proper synchronization, there is a barrier at the beginning
* of this function.
*
- * At present, this function is called under one of two
+ * At present, this function is called under one of two
* circumstances:
*
* 1) Dirty byte creation exceeds some user specified value.
@@ -1169,10 +1169,10 @@ done:
* and therefore the same dirty data creation.
*
* This fact is used to synchronize the caches for purposes
- * of propagating the list of candidate entries, by simply
- * calling this function from all caches whenever some user
- * specified threshold on dirty data is exceeded. (the
- * process 0 cache creates the candidate list just before
+ * of propagating the list of candidate entries, by simply
+ * calling this function from all caches whenever some user
+ * specified threshold on dirty data is exceeded. (the
+ * process 0 cache creates the candidate list just before
* calling this function).
*
* 2) Under direct user control -- this operation must be
@@ -1187,20 +1187,20 @@ done:
*
* For the process with mpi rank 0:
*
- * 1) Load the contents of the candidate list
+ * 1) Load the contents of the candidate list
* (candidate_slist_ptr) into a buffer, and broadcast that
* buffer to all the other caches. Clear the candidate
* list in passing.
*
- * If there is a positive number of candidates, proceed with
+ * If there is a positive number of candidates, proceed with
* the following:
*
* 2) Apply the candidate entry list.
*
* 3) Particpate in a closing barrier.
*
- * 4) Remove from the dirty list (d_slist_ptr) and from the
- * flushed and still clean entries list (c_slist_ptr),
+ * 4) Remove from the dirty list (d_slist_ptr) and from the
+ * flushed and still clean entries list (c_slist_ptr),
* all addresses that appeared in the candidate list, as
* these entries are now clean.
*
@@ -1209,7 +1209,7 @@ done:
*
* 1) Receive the candidate entry list broadcast
*
- * If there is a positive number of candidates, proceed with
+ * If there is a positive number of candidates, proceed with
* the following:
*
* 2) Apply the candidate entry list.
@@ -1266,8 +1266,8 @@ H5AC__propagate_and_apply_candidate_list(H5F_t *f)
if(num_candidates > 0) {
herr_t result;
- /* all processes apply the candidate list.
- * H5C_apply_candidate_list() handles the details of
+ /* all processes apply the candidate list.
+ * H5C_apply_candidate_list() handles the details of
* distributing the writes across the processes.
*/
@@ -1286,7 +1286,7 @@ H5AC__propagate_and_apply_candidate_list(H5F_t *f)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't apply candidate list.")
/* this code exists primarily for the test bed -- it allows us to
- * enforce posix semantics on the server that pretends to be a
+ * enforce posix semantics on the server that pretends to be a
* file system in our parallel tests.
*/
if(aux_ptr->write_done)
@@ -1325,16 +1325,16 @@ done:
* Function: H5AC__propagate_flushed_and_still_clean_entries_list
*
* Purpose: In PHDF5, if the process 0 only metadata write strategy
- * is selected, only the metadata cache with mpi rank 0 is
- * allowed to write to file. All other metadata caches on
- * processes with rank greater than 0 must retain dirty
- * entries until they are notified that the entry is now
+ * is selected, only the metadata cache with mpi rank 0 is
+ * allowed to write to file. All other metadata caches on
+ * processes with rank greater than 0 must retain dirty
+ * entries until they are notified that the entry is now
* clean.
*
- * This function is the main routine for handling this
- * notification procedure. It must be called
- * simultaniously on all processes that have the relevant
- * file open. To this end, it is called only during a
+ * This function is the main routine for handling this
+ * notification procedure. It must be called
+ * simultaniously on all processes that have the relevant
+ * file open. To this end, it is called only during a
* sync point, with a barrier prior to the call.
*
* Note that any metadata entry writes by process 0 will
@@ -1430,7 +1430,7 @@ done:
*
* Purpose: Receive the list of entry addresses from process 0,
* and return it in a buffer pointed to by *haddr_buf_ptr_ptr.
- * Note that the caller must free this buffer if it is
+ * Note that the caller must free this buffer if it is
* returned.
*
* This function must only be called by the process with
@@ -1481,7 +1481,7 @@ H5AC__receive_haddr_list(MPI_Comm mpi_comm, unsigned *num_entries_ptr,
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
} /* end if */
- /* finally, pass the number of entries and the buffer pointer
+ /* finally, pass the number of entries and the buffer pointer
* back to the caller.
*/
*num_entries_ptr = num_entries;
@@ -1564,7 +1564,7 @@ done:
*
* Purpose: Receive the list of candidate entries from process 0,
* and return it in a buffer pointed to by *haddr_buf_ptr_ptr.
- * Note that the caller must free this buffer if it is
+ * Note that the caller must free this buffer if it is
* returned.
*
* This function must only be called by the process with
@@ -1614,38 +1614,38 @@ done:
* Purpose: Routine for handling the details of running a sync point
* that is triggered by a flush -- which in turn must have been
* triggered by either a flush API call or a file close --
- * when the distributed metadata write strategy is selected.
- *
- * Upon entry, each process generates it own candidate list,
- * being a sorted list of all dirty metadata entries currently
- * in the metadata cache. Note that this list must be idendical
- * across all processes, as all processes see the same stream
- * of dirty metadata coming in, and use the same lists of
- * candidate entries at each sync point. (At first glance, this
+ * when the distributed metadata write strategy is selected.
+ *
+ * Upon entry, each process generates it own candidate list,
+ * being a sorted list of all dirty metadata entries currently
+ * in the metadata cache. Note that this list must be idendical
+ * across all processes, as all processes see the same stream
+ * of dirty metadata coming in, and use the same lists of
+ * candidate entries at each sync point. (At first glance, this
* argument sounds circular, but think of it in the sense of
* a recursive proof).
*
- * If this this list is empty, we are done, and the function
+ * If this this list is empty, we are done, and the function
* returns
*
- * Otherwise, after the sorted list dirty metadata entries is
- * constructed, each process uses the same algorithm to assign
- * each entry on the candidate list to exactly one process for
+ * Otherwise, after the sorted list dirty metadata entries is
+ * constructed, each process uses the same algorithm to assign
+ * each entry on the candidate list to exactly one process for
* flushing.
*
* At this point, all processes participate in a barrier to
* avoid messages from the past/future bugs.
*
- * Each process then flushes the entries assigned to it, and
+ * Each process then flushes the entries assigned to it, and
* marks all other entries on the candidate list as clean.
*
- * Finally, all processes participate in a second barrier to
+ * Finally, all processes participate in a second barrier to
* avoid messages from the past/future bugs.
*
* At the end of this process, process 0 and only process 0
- * must tidy up its lists of dirtied and cleaned entries.
+ * must tidy up its lists of dirtied and cleaned entries.
* These lists are not used in the distributed metadata write
- * strategy, but they must be maintained should we shift
+ * strategy, but they must be maintained should we shift
* to a strategy that uses them.
*
* Return: Success: non-negative
@@ -1678,7 +1678,7 @@ H5AC__rsp__dist_md_write__flush(H5F_t *f)
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
- /* first construct the candidate list -- initially, this will be in the
+ /* first construct the candidate list -- initially, this will be in the
* form of a skip list. We will convert it later.
*/
if(H5C_construct_candidate_list__clean_cache(cache_ptr) < 0)
@@ -1694,7 +1694,7 @@ H5AC__rsp__dist_md_write__flush(H5F_t *f)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer.")
/* Initial sync point barrier
- *
+ *
* When flushing from within the close operation from a file,
* it's possible to skip this barrier (on the second flush of the cache).
*/
@@ -1717,7 +1717,7 @@ H5AC__rsp__dist_md_write__flush(H5F_t *f)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't apply candidate list.")
/* this code exists primarily for the test bed -- it allows us to
- * enforce posix semantics on the server that pretends to be a
+ * enforce posix semantics on the server that pretends to be a
* file system in our parallel tests.
*/
if(aux_ptr->write_done)
@@ -1754,45 +1754,45 @@ done:
* Function: H5AC__rsp__dist_md_write__flush_to_min_clean
*
* Purpose: Routine for handling the details of running a sync point
- * triggered by the accumulation of dirty metadata (as
+ * triggered by the accumulation of dirty metadata (as
* opposed to a flush call to the API) when the distributed
* metadata write strategy is selected.
*
* After invocation and initial sanity checking this function
- * first checks to see if evictions are enabled -- if they
+ * first checks to see if evictions are enabled -- if they
* are not, the function does nothing and returns.
*
- * Otherwise, process zero constructs a list of entries to
+ * Otherwise, process zero constructs a list of entries to
* be flushed in order to bring the process zero cache back
- * within its min clean requirement. Note that this list
+ * within its min clean requirement. Note that this list
* (the candidate list) may be empty.
*
* Then, all processes participate in a barrier.
*
- * After the barrier, process 0 broadcasts the number of
- * entries in the candidate list prepared above, and all
+ * After the barrier, process 0 broadcasts the number of
+ * entries in the candidate list prepared above, and all
* other processes receive this number.
*
* If this number is zero, we are done, and the function
* returns without further action.
*
- * Otherwise, process 0 broadcasts the sorted list of
+ * Otherwise, process 0 broadcasts the sorted list of
* candidate entries, and all other processes receive it.
*
- * Then, each process uses the same algorithm to assign
- * each entry on the candidate list to exactly one process
+ * Then, each process uses the same algorithm to assign
+ * each entry on the candidate list to exactly one process
* for flushing.
*
- * Each process then flushes the entries assigned to it, and
+ * Each process then flushes the entries assigned to it, and
* marks all other entries on the candidate list as clean.
*
- * Finally, all processes participate in a second barrier to
+ * Finally, all processes participate in a second barrier to
* avoid messages from the past/future bugs.
*
* At the end of this process, process 0 and only process 0
- * must tidy up its lists of dirtied and cleaned entries.
+ * must tidy up its lists of dirtied and cleaned entries.
* These lists are not used in the distributed metadata write
- * strategy, but they must be maintained should we shift
+ * strategy, but they must be maintained should we shift
* to a strategy that uses them.
*
* Return: Success: non-negative
@@ -1848,25 +1848,25 @@ done:
*
* Purpose: Routine for handling the details of running a sync point
* that is triggered a flush -- which in turn must have been
- * triggered by either a flush API call or a file close --
- * when the process 0 only metadata write strategy is selected.
+ * triggered by either a flush API call or a file close --
+ * when the process 0 only metadata write strategy is selected.
*
* First, all processes participate in a barrier.
*
* Then process zero flushes all dirty entries, and broadcasts
- * they number of clean entries (if any) to all the other
+ * they number of clean entries (if any) to all the other
* caches.
*
* If this number is zero, we are done.
*
- * Otherwise, process 0 broadcasts the list of cleaned
+ * Otherwise, process 0 broadcasts the list of cleaned
* entries, and all other processes which are part of this
* file group receive it, and mark the listed entries as
* clean in their caches.
*
- * Since all processes have the same set of dirty
+ * Since all processes have the same set of dirty
* entries at the beginning of the sync point, and all
- * entries that will be written are written before
+ * entries that will be written are written before
* process zero broadcasts the number of cleaned entries,
* there is no need for a closing barrier.
*
@@ -1898,8 +1898,8 @@ H5AC__rsp__p0_only__flush(H5F_t *f)
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
- /* To prevent "messages from the future" we must
- * synchronize all processes before we start the flush.
+ /* To prevent "messages from the future" we must
+ * synchronize all processes before we start the flush.
* Hence the following barrier.
*
* However, when flushing from within the close operation from a file,
@@ -1927,7 +1927,7 @@ H5AC__rsp__p0_only__flush(H5F_t *f)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
/* this code exists primarily for the test bed -- it allows us to
- * enforce POSIX semantics on the server that pretends to be a
+ * enforce POSIX semantics on the server that pretends to be a
* file system in our parallel tests.
*/
if(aux_ptr->write_done)
@@ -1947,32 +1947,32 @@ done:
* Function: H5AC__rsp__p0_only__flush_to_min_clean
*
* Purpose: Routine for handling the details of running a sync point
- * triggered by the accumulation of dirty metadata (as
+ * triggered by the accumulation of dirty metadata (as
* opposed to a flush call to the API) when the process 0
* only metadata write strategy is selected.
*
* After invocation and initial sanity checking this function
- * first checks to see if evictions are enabled -- if they
+ * first checks to see if evictions are enabled -- if they
* are not, the function does nothing and returns.
*
* Otherwise, all processes participate in a barrier.
*
- * After the barrier, if this is process 0, the function
- * causes the cache to flush sufficient entries to get the
- * cache back within its minimum clean fraction, and broadcast
- * the number of entries which have been flushed since
+ * After the barrier, if this is process 0, the function
+ * causes the cache to flush sufficient entries to get the
+ * cache back within its minimum clean fraction, and broadcast
+ * the number of entries which have been flushed since
* the last sync point, and are still clean.
*
* If this number is zero, we are done.
*
- * Otherwise, process 0 broadcasts the list of cleaned
+ * Otherwise, process 0 broadcasts the list of cleaned
* entries, and all other processes which are part of this
* file group receive it, and mark the listed entries as
* clean in their caches.
*
- * Since all processes have the same set of dirty
+ * Since all processes have the same set of dirty
* entries at the beginning of the sync point, and all
- * entries that will be written are written before
+ * entries that will be written are written before
* process zero broadcasts the number of cleaned entries,
* there is no need for a closing barrier.
*
@@ -2028,9 +2028,9 @@ H5AC__rsp__p0_only__flush_to_min_clean(H5F_t *f)
if(0 == aux_ptr->mpi_rank) {
herr_t result;
- /* here, process 0 flushes as many entries as necessary to
+ /* here, process 0 flushes as many entries as necessary to
* comply with the currently specified min clean size.
- * Note that it is quite possible that no entries will be
+ * Note that it is quite possible that no entries will be
* flushed.
*/
@@ -2068,23 +2068,23 @@ done:
* Function: H5AC__run_sync_point
*
* Purpose: Top level routine for managing a sync point between all
- * meta data caches in the parallel case. Since all caches
+ * meta data caches in the parallel case. Since all caches
* see the same sequence of dirty metadata, we simply count
* bytes of dirty metadata, and run a sync point whenever the
* number of dirty bytes of metadata seen since the last
* sync point exceeds a threshold that is common across all
- * processes. We also run sync points in response to
+ * processes. We also run sync points in response to
* HDF5 API calls triggering either a flush or a file close.
*
- * In earlier versions of PHDF5, only the metadata cache with
- * mpi rank 0 was allowed to write to file. All other
+ * In earlier versions of PHDF5, only the metadata cache with
+ * mpi rank 0 was allowed to write to file. All other
* metadata caches on processes with rank greater than 0 were
- * required to retain dirty entries until they were notified
+ * required to retain dirty entries until they were notified
* that the entry is was clean.
*
- * This function was created to make it easier for us to
- * experiment with other options, as it is a single point
- * for the execution of sync points.
+ * This function was created to make it easier for us to
+ * experiment with other options, as it is a single point
+ * for the execution of sync points.
*
* Return: Success: non-negative
*
@@ -2199,13 +2199,13 @@ done:
*
* Purpose: In the distributed metadata write strategy, not all dirty
* entries are written by process 0 -- thus we must tidy
- * up the dirtied, and flushed and still clean lists
+ * up the dirtied, and flushed and still clean lists
* maintained by process zero after each sync point.
*
* This procedure exists to tend to this issue.
*
* At this point, all entries that process 0 cleared should
- * have been removed from both the dirty and flushed and
+ * have been removed from both the dirty and flushed and
* still clean lists, and entries that process 0 has flushed
* should have been removed from the dirtied list and added
* to the flushed and still clean list.
@@ -2216,7 +2216,7 @@ done:
* them to be used should the metadata write strategy change
* to one that uses these lists.
*
- * Thus for our purposes, all we need to do is remove from
+ * Thus for our purposes, all we need to do is remove from
* the dirtied and flushed and still clean lists all
* references to entries that appear in the candidate list.
*
@@ -2248,11 +2248,11 @@ H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates,
HDassert(num_candidates > 0);
HDassert(candidates_list_ptr != NULL);
- /* clean up dirtied and flushed and still clean lists by removing
- * all entries on the candidate list. Cleared entries should
- * have been removed from both the dirty and cleaned lists at
- * this point, flushed entries should have been added to the
- * cleaned list. However, for this metadata write strategy,
+ /* clean up dirtied and flushed and still clean lists by removing
+ * all entries on the candidate list. Cleared entries should
+ * have been removed from both the dirty and cleaned lists at
+ * this point, flushed entries should have been added to the
+ * cleaned list. However, for this metadata write strategy,
* we just want to remove all references to the candidate entries.
*/
for(u = 0; u < num_candidates; u++) {
@@ -2262,7 +2262,7 @@ H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates,
addr = candidates_list_ptr[u];
- /* addr may be either on the dirtied list, or on the flushed
+ /* addr may be either on the dirtied list, or on the flushed
* and still clean list. Remove it.
*/
if(NULL != (d_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->d_slist_ptr, (void *)&addr)))
diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h
index 8997382..86ff385 100644
--- a/src/H5ACpkg.h
+++ b/src/H5ACpkg.h
@@ -153,23 +153,23 @@ H5FL_EXTERN(H5AC_aux_t);
*
* Update: When the above was written, I planned to allow the process
* 0 metadata cache to write dirty metadata between sync points.
- * However, testing indicated that this allowed occasional
+ * However, testing indicated that this allowed occasional
* messages from the future to reach the caches on other processes.
*
* To resolve this, the code was altered to require that all metadata
* writes take place during sync points -- which solved the problem.
- * Initially all writes were performed by the process 0 cache. This
+ * Initially all writes were performed by the process 0 cache. This
* approach was later replaced with a distributed write approach
- * in which each process writes a subset of the metadata to be
- * written.
- *
- * After thinking on the matter for a while, I arrived at the
- * conclusion that the process 0 cache could be allowed to write
- * dirty metadata between sync points if it restricted itself to
- * entries that had been dirty at the time of the previous sync point.
- *
+ * in which each process writes a subset of the metadata to be
+ * written.
+ *
+ * After thinking on the matter for a while, I arrived at the
+ * conclusion that the process 0 cache could be allowed to write
+ * dirty metadata between sync points if it restricted itself to
+ * entries that had been dirty at the time of the previous sync point.
+ *
* To date, there has been no attempt to implement this optimization.
- * However, should it be attempted, much of the supporting code
+ * However, should it be attempted, much of the supporting code
* should still be around.
*
* JRM -- 1/6/15
@@ -206,14 +206,14 @@ H5FL_EXTERN(H5AC_aux_t);
* broadcast. This field is reset to zero after each such
* broadcast.
*
- * metadata_write_strategy: Integer code indicating how we will be
- * writing the metadata. In the first incarnation of
+ * metadata_write_strategy: Integer code indicating how we will be
+ * writing the metadata. In the first incarnation of
* this code, all writes were done from process 0. This
- * field exists to facilitate experiments with other
+ * field exists to facilitate experiments with other
* strategies.
*
* At present, this field must be set to either
- * H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY or
+ * H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY or
* H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED.
*
* dirty_bytes_propagations: This field only exists when the
@@ -267,7 +267,7 @@ H5FL_EXTERN(H5AC_aux_t);
*
* Things have changed a bit since the following four fields were defined.
* If metadata_write_strategy is H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY,
- * all comments hold as before -- with the caviate that pending further
+ * all comments hold as before -- with the caviate that pending further
* coding, the process 0 metadata cache is forbidden to flush entries outside
* of a sync point.
*
@@ -337,16 +337,16 @@ H5FL_EXTERN(H5AC_aux_t);
* needed.
*
* Note: This field has been extended for use by all processes
- * with the addition of support for the distributed
- * metadata write strategy.
+ * with the addition of support for the distributed
+ * metadata write strategy.
* JRM -- 5/9/10
*
* sync_point_done: In the parallel test bed, it is necessary to verify
* that the expected writes, and only the expected writes,
* have taken place at the end of each sync point.
*
- * The sync_point_done callback allows t_cache to perform
- * this verification. The field is set to NULL when the
+ * The sync_point_done callback allows t_cache to perform
+ * this verification. The field is set to NULL when the
* callback is not needed.
*
* The following field supports the metadata cache image feature.
@@ -402,7 +402,7 @@ typedef struct H5AC_aux_t
void (* write_done)(void);
- void (* sync_point_done)(unsigned num_writes,
+ void (* sync_point_done)(unsigned num_writes,
haddr_t * written_entries_tbl);
unsigned p0_image_len;
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 7d2d4ad..1416847 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -88,18 +88,18 @@ typedef enum {
*
* Hence the following, somewhat odd set of #defines.
*
- * NOTE: test/cache plays games with the f->shared->cache, and thus
- * setting H5AC_DUMP_STATS_ON_CLOSE will generate constant,
- * irrelevant data when run with that test program. See
+ * NOTE: test/cache plays games with the f->shared->cache, and thus
+ * setting H5AC_DUMP_STATS_ON_CLOSE will generate constant,
+ * irrelevant data when run with that test program. See
* comments on setup_cache() / takedown_cache() in test/cache_common.c.
* for details.
*
* If you need to dump stats at file close in test/cache.c,
- * use the dump_stats parameter to takedown_cache(), or call
+ * use the dump_stats parameter to takedown_cache(), or call
* H5C_stats() directly.
* JRM -- 4/12/15
*
- * Added the H5AC_DUMP_IMAGE_STATS_ON_CLOSE #define, which works much
+ * Added the H5AC_DUMP_IMAGE_STATS_ON_CLOSE #define, which works much
* the same way as H5AC_DUMP_STATS_ON_CLOSE. However, the set of stats
* displayed is much smaller, and directed purely at the cache image feature.
*
@@ -418,7 +418,7 @@ H5_DLL herr_t H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr,
H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t *config_ptr);
/* Cache image routines */
-H5_DLL herr_t H5AC_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr,
+H5_DLL herr_t H5AC_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr,
hsize_t len, hbool_t rw);
H5_DLL herr_t H5AC_validate_cache_image_config(H5AC_cache_image_config_t *config_ptr);
H5_DLL hbool_t H5AC_cache_image_pending(const H5F_t *f);
diff --git a/src/H5Adense.c b/src/H5Adense.c
index c6aa8e0..1caaa41 100644
--- a/src/H5Adense.c
+++ b/src/H5Adense.c
@@ -313,17 +313,17 @@ H5A__dense_fnd_cb(const H5A_t *attr, hbool_t *took_ownership, void *_user_attr)
HDassert(user_attr);
HDassert(took_ownership);
/*
- * If there is an attribute already stored in "user_attr",
- * we need to free the dynamially allocated spaces for the
- * attribute, otherwise we got infinite loop closing library due to
+ * If there is an attribute already stored in "user_attr",
+ * we need to free the dynamially allocated spaces for the
+ * attribute, otherwise we got infinite loop closing library due to
* outstanding allocation. (HDFFV-10659)
*
* This callback is used by H5A__dense_remove() to close/free the
* attribute stored in "user_attr" (via H5O__msg_free_real()) after
* the attribute node is deleted from the name index v2 B-tree.
- * The issue is:
- * When deleting the attribute node from the B-tree,
- * if the attribute is found in the intermediate B-tree nodes,
+ * The issue is:
+ * When deleting the attribute node from the B-tree,
+ * if the attribute is found in the intermediate B-tree nodes,
* which may be merged/redistributed, we need to free the dynamically
* allocated spaces for the intermediate decoded attribute.
*/
diff --git a/src/H5B2.c b/src/H5B2.c
index 5355dd4..3f37d25 100644
--- a/src/H5B2.c
+++ b/src/H5B2.c
@@ -507,7 +507,7 @@ H5B2_find(H5B2_t *bt2, void *udata, H5B2_found_t op, void *op_data)
if((hdr->cls->compare)(udata, hdr->min_native_rec, &cmp) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records")
if(cmp < 0)
- HGOTO_DONE(FALSE) /* Less than the least record--not found */
+ HGOTO_DONE(FALSE) /* Less than the least record--not found */
else if(cmp == 0) { /* Record is found */
if(op && (op)(hdr->min_native_rec, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "'found' callback failed for B-tree find operation")
@@ -518,7 +518,7 @@ H5B2_find(H5B2_t *bt2, void *udata, H5B2_found_t op, void *op_data)
if((hdr->cls->compare)(udata, hdr->max_native_rec, &cmp) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records")
if(cmp > 0)
- HGOTO_DONE(FALSE) /* Less than the least record--not found */
+ HGOTO_DONE(FALSE) /* Less than the least record--not found */
else if(cmp == 0) { /* Record is found */
if(op && (op)(hdr->max_native_rec, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "'found' callback failed for B-tree find operation")
@@ -1601,7 +1601,7 @@ done:
* Function: H5B2_patch_file
*
* Purpose: Patch the top-level file pointer contained in bt2
- * to point to idx_info->f if they are different.
+ * to point to idx_info->f if they are different.
* This is possible because the file pointer in bt2 can be
* closed out if bt2 remains open.
*
diff --git a/src/H5B2int.c b/src/H5B2int.c
index a6faab8..902253a 100644
--- a/src/H5B2int.c
+++ b/src/H5B2int.c
@@ -56,7 +56,7 @@
/********************/
/* Local Prototypes */
/********************/
-static herr_t H5B2__update_child_flush_depends(H5B2_hdr_t *hdr,
+static herr_t H5B2__update_child_flush_depends(H5B2_hdr_t *hdr,
unsigned depth, const H5B2_node_ptr_t *node_ptrs, unsigned start_idx,
unsigned end_idx, void *old_parent, void *new_parent);
@@ -1759,7 +1759,7 @@ H5B2__create_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
-
+
/* Sanity check */
HDassert(parent_entry);
HDassert(child_entry);
@@ -1796,7 +1796,7 @@ H5B2__update_flush_depend(H5B2_hdr_t *hdr, unsigned depth,
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
-
+
/* Sanity checks */
HDassert(hdr);
HDassert(depth > 0);
@@ -1886,14 +1886,14 @@ done:
*/
static herr_t
H5B2__update_child_flush_depends(H5B2_hdr_t *hdr, unsigned depth,
- const H5B2_node_ptr_t *node_ptrs, unsigned start_idx, unsigned end_idx,
+ const H5B2_node_ptr_t *node_ptrs, unsigned start_idx, unsigned end_idx,
void *old_parent, void *new_parent)
{
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
-
+
/* Sanity checks */
HDassert(hdr);
HDassert(depth > 1);
@@ -1931,7 +1931,7 @@ H5B2__destroy_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
-
+
/* Sanity check */
HDassert(parent_entry);
HDassert(child_entry);
diff --git a/src/H5B2internal.c b/src/H5B2internal.c
index 7f6b80a..54f581e 100644
--- a/src/H5B2internal.c
+++ b/src/H5B2internal.c
@@ -388,7 +388,7 @@ H5B2__insert_internal(H5B2_hdr_t *hdr, uint16_t depth, unsigned *parent_cache_in
size_t split_nrec; /* Number of records to split node at */
/* Locate node pointer for child */
- if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native,
+ if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native,
udata, &idx, &cmp) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records")
if(cmp == 0)
@@ -444,7 +444,7 @@ H5B2__insert_internal(H5B2_hdr_t *hdr, uint16_t depth, unsigned *parent_cache_in
/* Locate node pointer for child (after split/redistribute) */
/* Actually, this can be easily updated (for 2-node redistrib.) and shouldn't require re-searching */
- if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native,
+ if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native,
udata, &idx, &cmp) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records")
if(cmp == 0)
diff --git a/src/H5B2pkg.h b/src/H5B2pkg.h
index 27229f1..338db5a 100644
--- a/src/H5B2pkg.h
+++ b/src/H5B2pkg.h
@@ -433,7 +433,7 @@ H5_DLL herr_t H5B2__remove_internal(H5B2_hdr_t *hdr, hbool_t *depth_decreased,
H5_DLL herr_t H5B2__remove_leaf(H5B2_hdr_t *hdr, H5B2_node_ptr_t *curr_node_ptr,
H5B2_nodepos_t curr_pos, void *parent, void *udata, H5B2_remove_t op,
void *op_data);
-H5_DLL herr_t H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr,
+H5_DLL herr_t H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr,
hbool_t *depth_decreased, void *swap_loc, void *swap_parent, uint16_t depth,
H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t n,
diff --git a/src/H5C.c b/src/H5C.c
index c5bd220..cc415ec 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -168,7 +168,7 @@ static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t * type,
haddr_t addr, size_t *len, hbool_t actual);
#if H5C_DO_SLIST_SANITY_CHECKS
-static hbool_t H5C_entry_in_skip_list(H5C_t * cache_ptr,
+static hbool_t H5C_entry_in_skip_list(H5C_t * cache_ptr,
H5C_cache_entry_t *target_ptr);
#endif /* H5C_DO_SLIST_SANITY_CHECKS */
@@ -719,10 +719,10 @@ H5C_free_tag_list_cb(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED
*
* Function: H5C_prep_for_file_close
*
- * Purpose: This function should be called just prior to the cache
- * flushes at file close. There should be no protected
+ * Purpose: This function should be called just prior to the cache
+ * flushes at file close. There should be no protected
* entries in the cache at this point.
- *
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -747,8 +747,8 @@ H5C_prep_for_file_close(H5F_t *f)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- /* For now at least, it is possible to receive the
- * close warning more than once -- the following
+ /* For now at least, it is possible to receive the
+ * close warning more than once -- the following
* if statement handles this.
*/
if(cache_ptr->close_warning_received)
@@ -764,8 +764,8 @@ H5C_prep_for_file_close(H5F_t *f)
#ifdef H5_HAVE_PARALLEL
if ( ( H5F_INTENT(f) & H5F_ACC_RDWR ) &&
- ( ! image_generated ) &&
- ( cache_ptr->aux_ptr != NULL ) &&
+ ( ! image_generated ) &&
+ ( cache_ptr->aux_ptr != NULL ) &&
( f->shared->fs_persist ) ) {
/* If persistent free space managers are enabled, flushing the
* metadata cache may result in the deletion, insertion, and/or
@@ -1008,31 +1008,31 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
- * Changes: Modified function to test for slist chamges in
+ * Changes: Modified function to test for slist chamges in
* pre_serialize and serialize callbacks, and re-start
* scans through the slist when such changes occur.
*
* This has been a potential problem for some time,
- * and there has been code in this function to deal
- * with elements of this issue. However the shift
+ * and there has been code in this function to deal
+ * with elements of this issue. However the shift
* to the V3 cache in combination with the activities
- * of some of the cache clients (in particular the
+ * of some of the cache clients (in particular the
* free space manager and the fractal heap) have
* made this re-work necessary.
*
* JRM -- 12/13/14
*
- * Modified function to support rings. Basic idea is that
+ * Modified function to support rings. Basic idea is that
* every entry in the cache is assigned to a ring. Entries
- * in the outermost ring are flushed first, followed by
- * those in the next outermost ring, and so on until the
- * innermost ring is flushed. See header comment on
- * H5C_ring_t in H5Cprivate.h for a more detailed
+ * in the outermost ring are flushed first, followed by
+ * those in the next outermost ring, and so on until the
+ * innermost ring is flushed. See header comment on
+ * H5C_ring_t in H5Cprivate.h for a more detailed
* discussion.
*
* JRM -- 8/30/15
*
- * Modified function to call the free space manager
+ * Modified function to call the free space manager
* settling functions.
* JRM -- 6/9/16
*
@@ -1110,7 +1110,7 @@ H5C_flush_cache(H5F_t *f, unsigned flags)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed")
} /* end if */
else {
- /* flush each ring, starting from the outermost ring and
+ /* flush each ring, starting from the outermost ring and
* working inward.
*/
ring = H5C_RING_USER;
@@ -2244,7 +2244,7 @@ H5C_protect(H5F_t * f,
if(entry_ptr->prefetched) {
/* This call removes the prefetched entry from the cache,
- * and replaces it with an entry deserialized from the
+ * and replaces it with an entry deserialized from the
* image of the prefetched entry.
*/
if(H5C__deserialize_prefetched_entry(f, cache_ptr, &entry_ptr, type, addr, udata) < 0)
@@ -2313,7 +2313,7 @@ H5C_protect(H5F_t * f,
if(cache_ptr->ignore_tags != TRUE) {
haddr_t tag; /* Tag value */
- /* The entry is already in the cache, but make sure that the tag value
+ /* The entry is already in the cache, but make sure that the tag value
is still legal. This will ensure that had
the entry NOT been in the cache, tagging was still set up correctly
and it would have received a legal tag value after getting loaded
@@ -2321,7 +2321,7 @@ H5C_protect(H5F_t * f,
/* Get the tag */
tag = H5CX_get_tag();
-
+
if(H5C_verify_tag(entry_ptr->type->id, tag) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed")
} /* end if */
@@ -2337,9 +2337,9 @@ H5C_protect(H5F_t * f,
hit = FALSE;
- if(NULL == (thing = H5C_load_entry(f,
+ if(NULL == (thing = H5C_load_entry(f,
#ifdef H5_HAVE_PARALLEL
- coll_access,
+ coll_access,
#endif /* H5_HAVE_PARALLEL */
type, addr, udata)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
@@ -2448,12 +2448,12 @@ H5C_protect(H5F_t * f,
* *******************************************
*
* Set the flush_last field
- * of the newly loaded entry before inserting it into the
- * index. Must do this, as the index tracked the number of
- * entries with the flush_last field set, but assumes that
+ * of the newly loaded entry before inserting it into the
+ * index. Must do this, as the index tracked the number of
+ * entries with the flush_last field set, but assumes that
* the field will not change after insertion into the index.
*
- * Note that this means that the H5C__FLUSH_LAST_FLAG flag
+ * Note that this means that the H5C__FLUSH_LAST_FLAG flag
* is ignored if the entry is already in cache.
*/
entry_ptr->flush_me_last = flush_last;
@@ -2485,7 +2485,7 @@ H5C_protect(H5F_t * f,
HDassert(entry_ptr->ro_ref_count > 0);
(entry_ptr->ro_ref_count)++;
} /* end if */
- else
+ else
HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?")
} /* end if */
else {
@@ -2889,7 +2889,7 @@ done:
* 3/22/06
*
* Changes: Added extreme sanity checks on entry and exit.
- JRM -- 4/26/14
+ JRM -- 4/26/14
*
*-------------------------------------------------------------------------
*/
@@ -3111,7 +3111,7 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
if(entry_ptr->flush_dep_nparents > 0)
if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
- } /* end if */
+ } /* end if */
/* Check for newly clean entry */
else if(!was_clean && !entry_ptr->is_dirty) {
/* If the entry's type has a 'notify' callback send a 'entry cleaned'
@@ -3962,8 +3962,8 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted)
HDassert( (cache_ptr->resize_ctl).min_clean_fraction <= (double)100.0f );
/* check to see if cache_ptr->resize_in_progress is TRUE. If it, this
- * is a re-entrant call via a client callback called in the resize
- * process. To avoid an infinite recursion, set reentrant_call to
+ * is a re-entrant call via a client callback called in the resize
+ * process. To avoid an infinite recursion, set reentrant_call to
* TRUE, and goto done.
*/
if(cache_ptr->resize_in_progress) {
@@ -5153,7 +5153,7 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
while(entry_ptr) {
/* Check ring */
HDassert(entry_ptr->ring == H5C_RING_SB);
-
+
/* Advance to next entry in pinned entry list */
entry_ptr = entry_ptr->next;
} /* end while */
@@ -5195,7 +5195,7 @@ done:
* until either the cache is empty, or the number of pinned
* entries stops decreasing on each pass.
*
- * If flush dependencies appear in the target ring, the
+ * If flush dependencies appear in the target ring, the
* function makes repeated passes through the cache flushing
* entries in flush dependency order.
*
@@ -5248,8 +5248,8 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
/* The flush procedure here is a bit strange.
*
* In the outer while loop we make at least one pass through the
- * cache, and then repeat until either all the pinned entries in
- * the ring unpin themselves, or until the number of pinned entries
+ * cache, and then repeat until either all the pinned entries in
+ * the ring unpin themselves, or until the number of pinned entries
* in the ring stops declining. In this later case, we scream and die.
*
* Since the fractal heap can dirty, resize, and/or move entries
@@ -5305,9 +5305,9 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
/* There is also the possibility that entries will be
* dirtied, resized, moved, and/or removed from the cache
- * as the result of calls to the flush callbacks. We use
- * the slist_len_increase and slist_size_increase increase
- * fields in struct H5C_t to track these changes for purpose
+ * as the result of calls to the flush callbacks. We use
+ * the slist_len_increase and slist_size_increase increase
+ * fields in struct H5C_t to track these changes for purpose
* of sanity checking.
*
* To this end, we must zero these fields before we start
@@ -5322,8 +5322,8 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
* This flag is set to TRUE by H5C__flush_single_entry if the slist
* is modified by a pre_serialize, serialize, or notify callback.
*
- * H5C_flush_invalidate_ring() uses this flag to detect any
- * modifications to the slist that might corrupt the scan of
+ * H5C_flush_invalidate_ring() uses this flag to detect any
+ * modifications to the slist that might corrupt the scan of
* the slist -- and restart the scan in this event.
*/
cache_ptr->slist_changed = FALSE;
@@ -5353,14 +5353,14 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
entry_ptr = next_entry_ptr;
- /* It is possible that entries will be dirtied, resized,
+ /* It is possible that entries will be dirtied, resized,
* flushed, or removed from the cache via the take ownership
- * flag as the result of pre_serialize or serialized callbacks.
- *
+ * flag as the result of pre_serialize or serialized callbacks.
+ *
* This in turn can corrupt the scan through the slist.
*
- * We test for slist modifications in the pre_serialize
- * and serialize callbacks, and restart the scan of the
+ * We test for slist modifications in the pre_serialize
+ * and serialize callbacks, and restart the scan of the
* slist if we find them. However, best we do some extra
* sanity checking just in case.
*/
@@ -5493,8 +5493,8 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
if((!entry_ptr->flush_me_last || (entry_ptr->flush_me_last && cache_ptr->num_last_entries >= cache_ptr->slist_len))
&& entry_ptr->flush_dep_nchildren == 0 && entry_ptr->ring == ring) {
if(entry_ptr->is_protected) {
- /* we have major problems -- but lets flush and
- * destroy everything we can before we flag an
+ /* we have major problems -- but lets flush and
+ * destroy everything we can before we flag an
* error.
*/
protected_entries++;
@@ -5502,9 +5502,9 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
HDassert(!(entry_ptr->is_dirty));
} /* end if */
else if(!(entry_ptr->is_pinned)) {
- /* if *entry_ptr is dirty, it is possible
- * that one or more other entries may be
- * either removed from the cache, loaded
+ /* if *entry_ptr is dirty, it is possible
+ * that one or more other entries may be
+ * either removed from the cache, loaded
* into the cache, or moved to a new location
* in the file as a side effect of the flush.
*
@@ -5513,14 +5513,14 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
* entry, allowing it to be removed also and
* invalidating the next_entry_ptr.
*
- * If either of these happen, and one of the target
- * or proxy entries happens to be the next entry in
+ * If either of these happen, and one of the target
+ * or proxy entries happens to be the next entry in
* the hash bucket, we could either find ourselves
* either scanning a non-existant entry, scanning
* through a different bucket, or skipping an entry.
*
- * Neither of these are good, so restart the
- * the scan at the head of the hash bucket
+ * Neither of these are good, so restart the
+ * the scan at the head of the hash bucket
* after the flush if we detect that the next_entry_ptr
* becomes invalid.
*
@@ -5534,11 +5534,11 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
/* Restart the index list scan if necessary. Must
- * do this if the next entry is evicted, and also if
+ * do this if the next entry is evicted, and also if
* one or more entries are inserted, loaded, or moved
* as these operations can result in part of the scan
* being skipped -- which can cause a spurious failure
- * if this results in the size of the pinned entry
+ * if this results in the size of the pinned entry
* failing to decline during the pass.
*/
if((NULL != next_entry_ptr && NULL == cache_ptr->entry_watched_for_removal)
@@ -5581,7 +5581,7 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
entry_ptr = entry_ptr->next;
} /* end while */
- /* Check if the number of pinned entries in the ring is positive, and
+ /* Check if the number of pinned entries in the ring is positive, and
* it is not declining. Scream and die if so.
*/
if(cur_ring_pel_len > 0 && cur_ring_pel_len >= old_ring_pel_len) {
@@ -5624,16 +5624,16 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__flush_ring
*
- * Purpose: Flush the entries contained in the specified cache and
+ * Purpose: Flush the entries contained in the specified cache and
* ring. All entries in rings outside the specified ring
* must have been flushed on entry.
*
* If the cache contains protected entries in the specified
- * ring, the function will fail, as protected entries cannot
+ * ring, the function will fail, as protected entries cannot
* be flushed. However all unprotected entries in the target
* ring should be flushed before the function returns failure.
*
- * If flush dependencies appear in the target ring, the
+ * If flush dependencies appear in the target ring, the
* function makes repeated passes through the slist flushing
* entries in flush dependency order.
*
@@ -5699,10 +5699,10 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
/* Set the cache_ptr->slist_changed to false.
*
- * This flag is set to TRUE by H5C__flush_single_entry if the
+ * This flag is set to TRUE by H5C__flush_single_entry if the
* slist is modified by a pre_serialize, serialize, or notify callback.
* H5C_flush_cache uses this flag to detect any modifications
- * to the slist that might corrupt the scan of the slist -- and
+ * to the slist that might corrupt the scan of the slist -- and
* restart the scan in this event.
*/
cache_ptr->slist_changed = FALSE;
@@ -5722,8 +5722,8 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
*
* To make things more entertaining, with the advent of the
* fractal heap, the entry serialize callback can cause entries
- * to be dirtied, resized, and/or moved. Also, the
- * pre_serialize callback can result in an entry being
+ * to be dirtied, resized, and/or moved. Also, the
+ * pre_serialize callback can result in an entry being
* removed from the cache via the take ownership flag.
*
* To deal with this, we first make note of the initial
@@ -5774,24 +5774,24 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
HDassert(next_entry_ptr->is_dirty);
HDassert(next_entry_ptr->in_slist);
} /* end if */
-
+
entry_ptr = next_entry_ptr;
/* With the advent of the fractal heap, the free space
* manager, and the version 3 cache, it is possible
- * that the pre-serialize or serialize callback will
- * dirty, resize, or take ownership of other entries
- * in the cache.
+ * that the pre-serialize or serialize callback will
+ * dirty, resize, or take ownership of other entries
+ * in the cache.
*
* To deal with this, I have inserted code to detect any
* change in the skip list not directly under the control
* of this function. If such modifications are detected,
- * we must re-start the scan of the skip list to avoid
+ * we must re-start the scan of the skip list to avoid
* the possibility that the target of the next_entry_ptr
* may have been flushed or deleted from the cache.
*
* To verify that all such possibilities have been dealt
- * with, we do a bit of extra sanity checking on
+ * with, we do a bit of extra sanity checking on
* entry_ptr.
*/
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
@@ -5821,20 +5821,20 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
else
next_entry_ptr = NULL;
- if((!flush_marked_entries || entry_ptr->flush_marker)
+ if((!flush_marked_entries || entry_ptr->flush_marker)
&& (!entry_ptr->flush_me_last ||
- (entry_ptr->flush_me_last
+ (entry_ptr->flush_me_last
&& (cache_ptr->num_last_entries >= cache_ptr->slist_len
- || (flush_marked_entries && entry_ptr->flush_marker))))
+ || (flush_marked_entries && entry_ptr->flush_marker))))
&& (entry_ptr->flush_dep_nchildren == 0
- || entry_ptr->flush_dep_ndirty_children == 0)
+ || entry_ptr->flush_dep_ndirty_children == 0)
&& entry_ptr->ring == ring) {
HDassert(entry_ptr->flush_dep_nunser_children == 0);
if(entry_ptr->is_protected) {
- /* we probably have major problems -- but lets
- * flush everything we can before we decide
+ /* we probably have major problems -- but lets
+ * flush everything we can before we decide
* whether to flag an error.
*/
tried_to_flush_protected_entry = TRUE;
@@ -5846,9 +5846,9 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
if(cache_ptr->slist_changed) {
/* The slist has been modified by something
- * other than the simple removal of the
+ * other than the simple removal of the
* of the flushed entry after the flush.
- *
+ *
* This has the potential to corrupt the
* scan through the slist, so restart it.
*/
@@ -5976,11 +5976,11 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
else
write_entry = FALSE;
- /* if we have received close warning, and we have been instructed to
+ /* if we have received close warning, and we have been instructed to
* generate a metadata cache image, and we have actually constructed
* the entry images, set suppress_image_entry_frees to TRUE.
*
- * Set suppress_image_entry_writes to TRUE if indicated by the
+ * Set suppress_image_entry_writes to TRUE if indicated by the
* image_ctl flags.
*/
if(cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image
@@ -6059,11 +6059,11 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
} /* end if ( ! (entry_ptr->image_up_to_date) ) */
} /* end if */
- /* Finally, write the image to disk.
- *
- * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the
+ /* Finally, write the image to disk.
+ *
+ * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the
* in the entry's type, we silently skip the write. This
- * flag should only be used in test code.
+ * flag should only be used in test code.
*/
if(write_entry) {
HDassert(entry_ptr->is_dirty);
@@ -6075,8 +6075,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
/* Write the image to disk unless the write is suppressed.
*
- * This happens if both suppress_image_entry_writes and
- * entry_ptr->include_in_image are TRUE, or if the
+ * This happens if both suppress_image_entry_writes and
+ * entry_ptr->include_in_image are TRUE, or if the
* H5AC__CLASS_SKIP_WRITES is set in the entry's type. This
* flag should only be used in test code
*/
@@ -6109,7 +6109,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
#endif /* H5_HAVE_PARALLEL */
} /* end if */
- /* if the entry has a notify callback, notify it that we have
+ /* if the entry has a notify callback, notify it that we have
* just flushed the entry.
*/
if(entry_ptr->type->notify &&
@@ -6121,7 +6121,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* made if it was appropriate to make them. Similarly, the entry
* has been written to disk if desired.
*
- * Thus it is now safe to update the cache data structures for the
+ * Thus it is now safe to update the cache data structures for the
* flush.
*/
@@ -6175,7 +6175,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
*
* 5) Remove it from the tag list for this object
*
- * Finally, if the destroy_entry flag is set, discard the
+ * Finally, if the destroy_entry flag is set, discard the
* entry.
*/
H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
@@ -6209,7 +6209,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
/* We are either doing a flush or a clear.
*
* A clear and a flush are the same from the point of
- * view of the replacement policy and the slist.
+ * view of the replacement policy and the slist.
* Hence no differentiation between them.
*
* JRM -- 7/7/07
@@ -6219,8 +6219,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
- /* mark the entry as clean and update the index for
- * entry clean. Also, call the clear callback
+ /* mark the entry as clean and update the index for
+ * entry clean. Also, call the clear callback
* if defined.
*/
entry_ptr->is_dirty = FALSE;
@@ -6252,8 +6252,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
end before the entry_ptr gets freed */
entry_addr = entry_ptr->addr;
- /* Internal cache data structures should now be up to date, and
- * consistent with the status of the entry.
+ /* Internal cache data structures should now be up to date, and
+ * consistent with the status of the entry.
*
* Now discard the entry if appropriate.
*/
@@ -6262,18 +6262,18 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
HDassert(0 == entry_ptr->flush_dep_nparents);
/* if both suppress_image_entry_frees and entry_ptr->include_in_image
- * are true, simply set entry_ptr->image_ptr to NULL, as we have
+ * are true, simply set entry_ptr->image_ptr to NULL, as we have
* another pointer to the buffer in an instance of H5C_image_entry_t
* in cache_ptr->image_entries.
*
* Otherwise, free the buffer if it exists.
*/
- if(suppress_image_entry_frees && entry_ptr->include_in_image)
+ if(suppress_image_entry_frees && entry_ptr->include_in_image)
entry_ptr->image_ptr = NULL;
else if(entry_ptr->image_ptr != NULL)
entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
- /* If the entry is not a prefetched entry, verify that the flush
+ /* If the entry is not a prefetched entry, verify that the flush
* dependency parents addresses array has been transferred.
*
* If the entry is prefetched, the free_isr routine will dispose of
@@ -6284,8 +6284,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
HDassert(NULL == entry_ptr->fd_parent_addrs);
} /* end if */
- /* Check whether we should free the space in the file that
- * the entry occupies
+ /* Check whether we should free the space in the file that
+ * the entry occupies
*/
if(free_file_space) {
hsize_t fsf_size;
@@ -6322,14 +6322,14 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
/* Reset the pointer to the cache the entry is within. -QAK */
entry_ptr->cache_ptr = NULL;
- /* increment entries_removed_counter and set
- * last_entry_removed_ptr. As we are likely abuut to
- * free the entry, recall that last_entry_removed_ptr
+ /* increment entries_removed_counter and set
+ * last_entry_removed_ptr. As we are likely abuut to
+ * free the entry, recall that last_entry_removed_ptr
* must NEVER be dereferenced.
*
* Recall that these fields are maintained to allow functions
- * that perform scans of lists of entries to detect the
- * unexpected removal of entries (via expunge, eviction,
+ * that perform scans of lists of entries to detect the
+ * unexpected removal of entries (via expunge, eviction,
* or take ownership at present), so that they can re-start
* their scans if necessary.
*
@@ -6373,15 +6373,15 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
HDassert(take_ownership);
/* client is taking ownership of the entry.
- * set bad magic here too so the cache will choke
+ * set bad magic here too so the cache will choke
* unless the entry is re-inserted properly
*/
entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
} /* end else */
} /* if (destroy) */
- /* Check if we have to update the page buffer with cleared entries
- * so it doesn't go out of date
+ /* Check if we have to update the page buffer with cleared entries
+ * so it doesn't go out of date
*/
if(update_page_buffer) {
/* Sanity check */
@@ -6398,9 +6398,9 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
done:
- HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
+ HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
( ! entry_ptr->flush_in_progress ) );
- HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
+ HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
( take_ownership ) || ( ! entry_ptr->is_dirty ) );
FUNC_LEAVE_NOAPI(ret_value)
@@ -6417,7 +6417,7 @@ done:
* If exceed, adjust 'len' accordingly.
*
* Verify that 'len' should not exceed eoa when 'actual' is
- * true i.e. 'len' is the actual length from get_load_size
+ * true i.e. 'len' is the actual length from get_load_size
* callback with non-null image pointer.
* If exceed, return error.
*
@@ -6438,7 +6438,7 @@ H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr,
FUNC_ENTER_STATIC
- /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
+ /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
* type to H5FD_MEM_DRAW via its call to H5F__accum_read().
* Thus we do the same for purposes of computing the EOA
* for sanity checks.
@@ -6570,7 +6570,7 @@ H5C_load_entry(H5F_t * f,
/* Get the # of read attempts */
max_tries = tries = H5F_GET_READ_ATTEMPTS(f);
- /*
+ /*
* This do/while loop performs the following till the metadata checksum
* is correct or the file's number of allowed read attempts are reached.
* --read the metadata
@@ -6870,8 +6870,8 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
/* check to see if cache_ptr->msic_in_progress is TRUE. If it, this
- * is a re-entrant call via a client callback called in the make
- * space in cache process. To avoid an infinite recursion, set
+ * is a re-entrant call via a client callback called in the make
+ * space in cache process. To avoid an infinite recursion, set
* reentrant_call to TRUE, and goto done.
*/
if(cache_ptr->msic_in_progress) {
@@ -6943,9 +6943,9 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
}
#endif /* H5C_COLLECT_CACHE_STATS */
- /* reset entries_removed_counter and
- * last_entry_removed_ptr prior to the call to
- * H5C__flush_single_entry() so that we can spot
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C__flush_single_entry() so that we can spot
* unexpected removals of entries from the cache,
* and set the restart_scan flag if proceeding
* would be likely to cause us to scan an entry
@@ -6962,7 +6962,7 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
restart_scan = TRUE;
- } else if ( (cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size
+ } else if ( (cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size
#ifdef H5_HAVE_PARALLEL
&& !(entry_ptr->coll_access)
#endif /* H5_HAVE_PARALLEL */
@@ -7120,7 +7120,7 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
prev_ptr = entry_ptr->aux_prev;
- if ( ( !(entry_ptr->prefetched_dirty) )
+ if ( ( !(entry_ptr->prefetched_dirty) )
#ifdef H5_HAVE_PARALLEL
&& ( ! (entry_ptr->coll_access) )
#endif /* H5_HAVE_PARALLEL */
@@ -7170,7 +7170,7 @@ done:
*
* Changes:
*
- * Added code to verify that the LRU contains no pinned
+ * Added code to verify that the LRU contains no pinned
* entries. JRM -- 4/25/14
*
*-------------------------------------------------------------------------
@@ -7250,7 +7250,7 @@ H5C_validate_lru_list(H5C_t * cache_ptr)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
}
- if ( ( entry_ptr->is_pinned ) ||
+ if ( ( entry_ptr->is_pinned ) ||
( entry_ptr->pinned_from_client ) ||
( entry_ptr->pinned_from_cache ) ) {
@@ -7286,7 +7286,7 @@ done:
*
* Function: H5C_validate_pinned_entry_list
*
- * Purpose: Debugging function that scans the pinned entry list for
+ * Purpose: Debugging function that scans the pinned entry list for
* errors.
*
* If an error is detected, the function generates a
@@ -7418,7 +7418,7 @@ done:
*
* Function: H5C_validate_protected_entry_list
*
- * Purpose: Debugging function that scans the protected entry list for
+ * Purpose: Debugging function that scans the protected entry list for
* errors.
*
* If an error is detected, the function generates a
@@ -7543,11 +7543,11 @@ done:
*
* Function: H5C_entry_in_skip_list
*
- * Purpose: Debugging function that scans skip list to see if it
- * is in present. We need this, as it is possible for
+ * Purpose: Debugging function that scans skip list to see if it
+ * is in present. We need this, as it is possible for
* an entry to be in the skip list twice.
*
- * Return: FALSE if the entry is not in the skip list, and TRUE
+ * Return: FALSE if the entry is not in the skip list, and TRUE
* if it is.
*
* Programmer: John Mainzer, 11/1/14
@@ -7613,7 +7613,7 @@ H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t *target_ptr)
*/
herr_t
H5C__flush_marked_entries(H5F_t * f)
-{
+{
herr_t ret_value = SUCCEED;
FUNC_ENTER_PACKAGE
@@ -7635,7 +7635,7 @@ done:
* Function: H5C_cork
*
* Purpose: To cork/uncork/get cork status of an object depending on "action":
- * H5C__SET_CORK:
+ * H5C__SET_CORK:
* To cork the object
* Return error if the object is already corked
* H5C__UNCORK:
@@ -7644,7 +7644,7 @@ done:
* H5C__GET_CORKED:
* To retrieve the cork status of an object in
* the parameter "corked"
- *
+ *
* Return: Success: Non-negative
* Failure: Negative
*
@@ -7654,7 +7654,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
+H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
{
H5C_tag_info_t *tag_info; /* Points to a tag info struct */
herr_t ret_value = SUCCEED;
@@ -7836,8 +7836,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__mark_flush_dep_serialized()
*
- * Purpose: Decrement the flush_dep_nunser_children fields of all the
- * target entry's flush dependency parents in response to
+ * Purpose: Decrement the flush_dep_nunser_children fields of all the
+ * target entry's flush dependency parents in response to
* the target entry becoming serialized.
*
* Return: Non-negative on success/Negative on failure
@@ -7886,7 +7886,7 @@ done:
* Function: H5C__mark_flush_dep_unserialized()
*
* Purpose: Increment the flush_dep_nunser_children fields of all the
- * target entry's flush dependency parents in response to
+ * target entry's flush dependency parents in response to
* the target entry becoming unserialized.
*
* Return: Non-negative on success/Negative on failure
@@ -7912,7 +7912,7 @@ H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr)
/* Sanity check */
HDassert(entry_ptr->flush_dep_parent);
HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children <
+ HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children <
entry_ptr->flush_dep_parent[u]->flush_dep_nchildren);
/* increment parents number of usserialized children */
@@ -7972,11 +7972,11 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
/*-------------------------------------------------------------------------
* Function: H5C__serialize_cache
*
- * Purpose: Serialize (i.e. construct an on disk image) for all entries
- * in the metadata cache including clean entries.
+ * Purpose: Serialize (i.e. construct an on disk image) for all entries
+ * in the metadata cache including clean entries.
*
* Note that flush dependencies and "flush me last" flags
- * must be observed in the serialization process.
+ * must be observed in the serialization process.
*
* Note also that entries may be loaded, flushed, evicted,
* expunged, relocated, resized, or removed from the cache
@@ -7984,17 +7984,17 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
* a regular flush.
*
* However, we are given that the cache will contain no protected
- * entries on entry to this routine (although entries may be
- * briefly protected and then unprotected during the serialize
- * process).
+ * entries on entry to this routine (although entries may be
+ * briefly protected and then unprotected during the serialize
+ * process).
*
- * The objective of this routine is serialize all entries and
- * to force all entries into their actual locations on disk.
+ * The objective of this routine is serialize all entries and
+ * to force all entries into their actual locations on disk.
*
- * The initial need for this routine is to settle all entries
- * in the cache prior to construction of the metadata cache
+ * The initial need for this routine is to settle all entries
+ * in the cache prior to construction of the metadata cache
* image so that the size of the cache image can be calculated.
- * However, I gather that other uses for the routine are
+ * However, I gather that other uses for the routine are
* under consideration.
*
* Return: Non-negative on success/Negative on failure or if there was
@@ -8065,10 +8065,10 @@ H5C__serialize_cache(H5F_t *f)
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
#ifndef NDEBUG
- /* if this is a debug build, set the serialization_count field of
+ /* if this is a debug build, set the serialization_count field of
* each entry in the cache to zero before we start the serialization.
* This allows us to detect the case in which any entry is serialized
- * more than once (a performance issues), and more importantly, the
+ * more than once (a performance issues), and more importantly, the
* case is which any flush depencency parent is serializes more than
* once (a correctness issue).
*/
@@ -8084,10 +8084,10 @@ H5C__serialize_cache(H5F_t *f)
} /* end block */
#endif /* NDEBUG */
- /* set cache_ptr->serialization_in_progress to TRUE, and back
+ /* set cache_ptr->serialization_in_progress to TRUE, and back
* to FALSE at the end of the function. Must maintain this flag
- * to support H5C_get_serialization_in_progress(), which is in
- * turn required to support sanity checking in some cache
+ * to support H5C_get_serialization_in_progress(), which is in
+ * turn required to support sanity checking in some cache
* clients.
*/
HDassert(!cache_ptr->serialization_in_progress);
@@ -8165,16 +8165,16 @@ done:
*
* If the cache contains protected entries in the specified
* ring, the function will fail, as protected entries cannot
- * be serialized. However all unprotected entries in the
- * target ring should be serialized before the function
+ * be serialized. However all unprotected entries in the
+ * target ring should be serialized before the function
* returns failure.
*
* If flush dependencies appear in the target ring, the
* function makes repeated passes through the index list
* serializing entries in flush dependency order.
*
- * All entries outside the H5C_RING_SBE are marked for
- * inclusion in the cache image. Entries in H5C_RING_SBE
+ * All entries outside the H5C_RING_SBE are marked for
+ * inclusion in the cache image. Entries in H5C_RING_SBE
* and below are marked for exclusion from the image.
*
* Return: Non-negative on success/Negative on failure or if there was
@@ -8209,76 +8209,76 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring)
/* The objective here is to serialize all entries in the cache ring
* in flush dependency order.
*
- * The basic algorithm is to scan the cache index list looking for
+ * The basic algorithm is to scan the cache index list looking for
* unserialized entries that are either not in a flush dependency
* relationship, or which have no unserialized children. Any such
- * entry is serialized and its flush dependency parents (if any) are
+ * entry is serialized and its flush dependency parents (if any) are
* informed -- allowing them to decrement their userialized child counts.
*
* However, this algorithm is complicated by the ability
- * of client serialization callbacks to perform operations on
- * on the cache which can result in the insertion, deletion,
+ * of client serialization callbacks to perform operations on
+ * on the cache which can result in the insertion, deletion,
* relocation, resize, dirty, flush, eviction, or removal (via the
* take ownership flag) of entries. Changes in the flush dependency
* structure are also possible.
*
- * On the other hand, the algorithm is simplified by the fact that
- * we are serializing, not flushing. Thus, as long as all entries
+ * On the other hand, the algorithm is simplified by the fact that
+ * we are serializing, not flushing. Thus, as long as all entries
* are serialized correctly, it doesn't matter if we have to go back
* and serialize an entry a second time.
*
- * These possible actions result in the following modfications to
+ * These possible actions result in the following modfications to
* tha basic algorithm:
*
- * 1) In the event of an entry expunge, eviction or removal, we must
- * restart the scan as it is possible that the next entry in our
+ * 1) In the event of an entry expunge, eviction or removal, we must
+ * restart the scan as it is possible that the next entry in our
* scan is no longer in the cache. Were we to examine this entry,
* we would be accessing deallocated memory.
*
- * 2) A resize, dirty, or insertion of an entry may result in the
- * the increment of a flush dependency parent's dirty and/or
- * unserialized child count. In the context of serializing the
- * the cache, this is a non-issue, as even if we have already
- * serialized the parent, it will be marked dirty and its image
- * marked out of date if appropriate when the child is serialized.
- *
+ * 2) A resize, dirty, or insertion of an entry may result in the
+ * the increment of a flush dependency parent's dirty and/or
+ * unserialized child count. In the context of serializing the
+ * the cache, this is a non-issue, as even if we have already
+ * serialized the parent, it will be marked dirty and its image
+ * marked out of date if appropriate when the child is serialized.
+ *
* However, this is a major issue for a flush, as were this to happen
* in a flush, it would violate the invariant that the flush dependency
- * feature is intended to enforce. As the metadata cache has no
- * control over the behavior of cache clients, it has no way of
+ * feature is intended to enforce. As the metadata cache has no
+ * control over the behavior of cache clients, it has no way of
* preventing this behaviour. However, it should detect it if at all
- * possible.
+ * possible.
*
* Do this by maintaining a count of the number of times each entry is
- * serialized during a cache serialization. If any flush dependency
+ * serialized during a cache serialization. If any flush dependency
* parent is serialized more than once, throw an assertion failure.
*
- * 3) An entry relocation will typically change the location of the
- * entry in the index list. This shouldn't cause problems as we
- * will scan the index list until we make a complete pass without
- * finding anything to serialize -- making relocations of either
+ * 3) An entry relocation will typically change the location of the
+ * entry in the index list. This shouldn't cause problems as we
+ * will scan the index list until we make a complete pass without
+ * finding anything to serialize -- making relocations of either
* the current or next entries irrelevant.
*
- * Note that since a relocation may result in our skipping part of
+ * Note that since a relocation may result in our skipping part of
* the index list, we must always do at least one more pass through
* the index list after an entry relocation.
*
- * 4) Changes in the flush dependency structure are possible on
+ * 4) Changes in the flush dependency structure are possible on
* entry insertion, load, expunge, evict, or remove. Destruction
- * of a flush dependency has no effect, as it can only relax the
+ * of a flush dependency has no effect, as it can only relax the
* flush dependencies. Creation of a flush dependency can create
- * an unserialized child of a flush dependency parent where all
+ * an unserialized child of a flush dependency parent where all
* flush dependency children were previously serialized. Should
* this child dirty the flush dependency parent when it is serialized,
* the parent will be re-serialized.
*
- * Per the discussion of 2) above, this is a non issue for cache
+ * Per the discussion of 2) above, this is a non issue for cache
* serialization, and a major problem for cache flush. Using the
- * same detection mechanism, throw an assertion failure if this
- * condition appears.
+ * same detection mechanism, throw an assertion failure if this
+ * condition appears.
*
- * Observe that either eviction or removal of entries as a result of
- * a serialization is not a problem as long as the flush depencency
+ * Observe that either eviction or removal of entries as a result of
+ * a serialization is not a problem as long as the flush depencency
* tree does not change beyond the removal of a leaf.
*/
while(!done) {
@@ -8296,7 +8296,7 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring)
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
/* Verify that either the entry is already serialized, or
- * that it is assigned to either the target or an inner
+ * that it is assigned to either the target or an inner
* ring.
*/
HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
@@ -8363,9 +8363,9 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring)
cache_ptr->entries_relocated_counter = 0;
/* At this point, all entries not marked "flush me last" and in
- * the current ring or outside it should be serialized and have up
- * to date images. Scan the index list again to serialize the
- * "flush me last" entries (if they are in the current ring) and to
+ * the current ring or outside it should be serialized and have up
+ * to date images. Scan the index list again to serialize the
+ * "flush me last" entries (if they are in the current ring) and to
* verify that all other entries have up to date images.
*/
entry_ptr = cache_ptr->il_head;
@@ -8418,7 +8418,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__serialize_single_entry
*
- * Purpose: Serialize the cache entry pointed to by the entry_ptr
+ * Purpose: Serialize the cache entry pointed to by the entry_ptr
* parameter.
*
* Return: Non-negative on success/Negative on failure
@@ -8483,12 +8483,12 @@ done:
* Purpose: Serialize an entry and generate its image.
*
* Note: This may cause the entry to be re-sized and/or moved in
- * the cache.
+ * the cache.
*
- * As we will not update the metadata cache's data structures
- * until we we finish the write, we must touch up these
- * data structures for size and location changes even if we
- * are about to delete the entry from the cache (i.e. on a
+ * As we will not update the metadata cache's data structures
+ * until we we finish the write, we must touch up these
+ * data structures for size and location changes even if we
+ * are about to delete the entry from the cache (i.e. on a
* flush destroy).
*
* Return: Non-negative on success/Negative on failure
@@ -8524,7 +8524,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
old_addr = entry_ptr->addr;
/* Call client's pre-serialize callback, if there's one */
- if(entry_ptr->type->pre_serialize &&
+ if(entry_ptr->type->pre_serialize &&
(entry_ptr->type->pre_serialize)(f, (void *)entry_ptr,
entry_ptr->addr, entry_ptr->size, &new_addr, &new_len, &serialize_flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
@@ -8592,7 +8592,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
*/
H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
- /* As we haven't updated the cache data structures for
+ /* As we haven't updated the cache data structures for
* for the flush or flush destroy yet, the entry should
* be in the slist. Thus update it for the size change.
*/
@@ -8604,8 +8604,8 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
entry_ptr->size = new_len;
} /* end if */
- /* If required, udate the entry and the cache data structures
- * for a move
+ /* If required, udate the entry and the cache data structures
+ * for a move
*/
if(serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
/* Update stats and entries relocated counter */
@@ -8637,7 +8637,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
entry_ptr->image_up_to_date = TRUE;
- /* Propagate the fact that the entry is serialized up the
+ /* Propagate the fact that the entry is serialized up the
* flush dependency chain if appropriate. Since the image must
* have been out of date for this function to have been called
* (see assertion on entry), no need to check that -- only check
@@ -8762,8 +8762,8 @@ H5C_remove_entry(void *_entry)
if(entry == cache->entry_watched_for_removal)
cache->entry_watched_for_removal = NULL;
- /* Internal cache data structures should now be up to date, and
- * consistent with the status of the entry.
+ /* Internal cache data structures should now be up to date, and
+ * consistent with the status of the entry.
*
* Now clean up internal cache fields if appropriate.
*/
diff --git a/src/H5CX.c b/src/H5CX.c
index 5ba9bd8..a910c33 100644
--- a/src/H5CX.c
+++ b/src/H5CX.c
@@ -694,7 +694,7 @@ H5CX__get_context(void)
/* No associated value with current thread - create one */
#ifdef H5_HAVE_WIN_THREADS
/* Win32 has to use LocalAlloc to match the LocalFree in DllMain */
- ctx = (H5CX_node_t **)LocalAlloc(LPTR, sizeof(H5CX_node_t *));
+ ctx = (H5CX_node_t **)LocalAlloc(LPTR, sizeof(H5CX_node_t *));
#else
/* Use HDmalloc here since this has to match the HDfree in the
* destructor and we want to avoid the codestack there.
@@ -1375,7 +1375,7 @@ H5CX_get_mpi_file_flushing(void)
/*-------------------------------------------------------------------------
- * Function: H5CX_get_mpio_rank0_bcast
+ * Function: H5CX_get_mpio_rank0_bcast
*
* Purpose: Retrieves if the dataset meets read-with-rank0-and-bcast requirements for the current API call context.
*
@@ -2711,7 +2711,7 @@ H5CX_set_mpio_actual_chunk_opt(H5D_mpio_actual_chunk_opt_mode_t mpio_actual_chun
/* Sanity checks */
HDassert(head && *head);
- HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
+ HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
(*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT));
/* Cache the value for later, marking it to set in DXPL when context popped */
@@ -2743,7 +2743,7 @@ H5CX_set_mpio_actual_io_mode(H5D_mpio_actual_io_mode_t mpio_actual_io_mode)
/* Sanity checks */
HDassert(head && *head);
- HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
+ HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
(*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT));
/* Cache the value for later, marking it to set in DXPL when context popped */
@@ -2847,7 +2847,7 @@ H5CX_test_set_mpio_coll_chunk_link_hard(int mpio_coll_chunk_link_hard)
/* Sanity checks */
HDassert(head && *head);
- HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
+ HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
(*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT));
H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, mpio_coll_chunk_link_hard)
@@ -2881,7 +2881,7 @@ H5CX_test_set_mpio_coll_chunk_multi_hard(int mpio_coll_chunk_multi_hard)
/* Sanity checks */
HDassert(head && *head);
- HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
+ HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
(*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT));
H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, mpio_coll_chunk_multi_hard)
@@ -2915,7 +2915,7 @@ H5CX_test_set_mpio_coll_chunk_link_num_true(int mpio_coll_chunk_link_num_true)
/* Sanity checks */
HDassert(head && *head);
- HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
+ HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
(*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT));
H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, mpio_coll_chunk_link_num_true)
@@ -2949,7 +2949,7 @@ H5CX_test_set_mpio_coll_chunk_link_num_false(int mpio_coll_chunk_link_num_false)
/* Sanity checks */
HDassert(head && *head);
- HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
+ HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
(*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT));
H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, mpio_coll_chunk_link_num_false)
@@ -2983,7 +2983,7 @@ H5CX_test_set_mpio_coll_chunk_multi_ratio_coll(int mpio_coll_chunk_multi_ratio_c
/* Sanity checks */
HDassert(head && *head);
- HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
+ HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
(*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT));
H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, mpio_coll_chunk_multi_ratio_coll)
@@ -3017,7 +3017,7 @@ H5CX_test_set_mpio_coll_chunk_multi_ratio_ind(int mpio_coll_chunk_multi_ratio_in
/* Sanity checks */
HDassert(head && *head);
- HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
+ HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
(*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT));
H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, mpio_coll_chunk_multi_ratio_ind)
@@ -3051,7 +3051,7 @@ H5CX_test_set_mpio_coll_rank0_bcast(hbool_t mpio_coll_rank0_bcast)
/* Sanity checks */
HDassert(head && *head);
- HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
+ HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT ||
(*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT));
H5CX_TEST_SET_PROP(H5D_XFER_COLL_RANK0_BCAST_NAME, mpio_coll_rank0_bcast)
diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c
index cb1d0e2..d5599f2 100644
--- a/src/H5Cdbg.c
+++ b/src/H5Cdbg.c
@@ -185,7 +185,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_dump_cache_LRU
*
- * Purpose: Print a summary of the contents of the metadata cache
+ * Purpose: Print a summary of the contents of the metadata cache
* LRU for debugging purposes.
*
* Return: Non-negative on success/Negative on failure
@@ -209,9 +209,9 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name)
HDassert(cache_name != NULL );
HDfprintf(stdout, "\n\nDump of metadata cache LRU \"%s\"\n", cache_name);
- HDfprintf(stdout, "LRU len = %d, LRU size = %d\n",
+ HDfprintf(stdout, "LRU len = %d, LRU size = %d\n",
cache_ptr->LRU_list_len, (int)(cache_ptr->LRU_list_size));
- HDfprintf(stdout, "index_size = %d, max_cache_size = %d, delta = %d\n\n",
+ HDfprintf(stdout, "index_size = %d, max_cache_size = %d, delta = %d\n\n",
(int)(cache_ptr->index_size), (int)(cache_ptr->max_cache_size),
(int)(cache_ptr->max_cache_size) - (int)(cache_ptr->index_size));
@@ -238,12 +238,12 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name)
if(NULL == entry_ptr->tag_info)
HDfprintf(stdout, " %16s ", "N/A");
else
- HDfprintf(stdout, " 0x%16llx ",
+ HDfprintf(stdout, " 0x%16llx ",
(long long)(entry_ptr->tag_info->tag));
HDfprintf(stdout, " %5lld ", (long long)(entry_ptr->size));
HDfprintf(stdout, " %d ", (int)(entry_ptr->ring));
- HDfprintf(stdout, " %2d %-32s ", (int)(entry_ptr->type->id),
+ HDfprintf(stdout, " %2d %-32s ", (int)(entry_ptr->type->id),
(entry_ptr->type->name));
HDfprintf(stdout, " %d", (int)(entry_ptr->is_dirty));
HDfprintf(stdout, "\n");
@@ -262,8 +262,8 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name)
/*-------------------------------------------------------------------------
* Function: H5C_dump_cache_skip_list
*
- * Purpose: Debugging routine that prints a summary of the contents of
- * the skip list used by the metadata cache metadata cache to
+ * Purpose: Debugging routine that prints a summary of the contents of
+ * the skip list used by the metadata cache metadata cache to
* maintain an address sorted list of dirty entries.
*
* Return: Non-negative on success/Negative on failure
@@ -343,9 +343,9 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
/*-------------------------------------------------------------------------
* Function: H5C_dump_coll_write_list
*
- * Purpose: Debugging routine that prints a summary of the contents of
- * the collective write skip list used by the metadata cache
- * in the parallel case to maintain a list of entries to write
+ * Purpose: Debugging routine that prints a summary of the contents of
+ * the collective write skip list used by the metadata cache
+ * in the parallel case to maintain a list of entries to write
* collectively at a sync point.
*
* Return: Non-negative on success/Negative on failure
@@ -381,7 +381,7 @@ H5C_dump_coll_write_list(H5C_t * cache_ptr, char * calling_fcn)
list_len = (int)H5SL_count(cache_ptr->coll_write_list);
- HDfprintf(stdout, "\n\nDumping MDC coll write list from %d:%s.\n",
+ HDfprintf(stdout, "\n\nDumping MDC coll write list from %d:%s.\n",
aux_ptr->mpi_rank, calling_fcn);
HDfprintf(stdout, " slist len = %u.\n", cache_ptr->slist_len);
@@ -773,7 +773,7 @@ H5C_stats(H5C_t * cache_ptr,
(((double)(cache_ptr->total_dirty_pf_entries_skipped_in_msic)) /
((double)(cache_ptr->calls_to_msic)));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s MSIC: Average/max dirty pf entries skipped = %lf / %ld\n",
cache_ptr->prefix,
average_dirty_pf_entries_skipped_per_call_to_msic,
@@ -798,9 +798,9 @@ H5C_stats(H5C_t * cache_ptr,
(long long)(cache_ptr->total_entries_scanned_in_msic -
cache_ptr->entries_scanned_to_make_space));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s slist/LRU/index scan restarts = %lld / %lld / %lld.\n",
- cache_ptr->prefix,
+ cache_ptr->prefix,
(long long)(cache_ptr->slist_scan_restarts),
(long long)(cache_ptr->LRU_scan_restarts),
(long long)(cache_ptr->index_scan_restarts));
@@ -827,7 +827,7 @@ H5C_stats(H5C_t * cache_ptr,
(long long)(cache_ptr->evictions[H5AC_PREFETCHED_ENTRY_ID]));
if(cache_ptr->prefetches > 0)
- prefetch_use_rate =
+ prefetch_use_rate =
(double)100.0f * ((double)(cache_ptr->prefetch_hits)) /
((double)(cache_ptr->prefetches));
else
@@ -886,7 +886,7 @@ H5C_stats(H5C_t * cache_ptr,
(int)(cache_ptr->max_read_protects[i]));
HDfprintf(stdout,
- "%s clears / flushes = %ld / %ld\n",
+ "%s clears / flushes = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->clears[i]),
(long)(cache_ptr->flushes[i]));
@@ -1158,21 +1158,21 @@ H5C__dump_entry(H5C_t *cache_ptr, const H5C_cache_entry_t *entry_ptr,
/*-------------------------------------------------------------------------
* Function: H5C_flush_dependency_exists()
*
- * Purpose: Test to see if a flush dependency relationship exists
- * between the supplied parent and child. Both parties
+ * Purpose: Test to see if a flush dependency relationship exists
+ * between the supplied parent and child. Both parties
* are indicated by addresses so as to avoid the necessity
- * of protect / unprotect calls prior to this call.
+ * of protect / unprotect calls prior to this call.
*
- * If either the parent or the child is not in the metadata
+ * If either the parent or the child is not in the metadata
* cache, the function sets *fd_exists_ptr to FALSE.
*
- * If both are in the cache, the childs list of parents is
+ * If both are in the cache, the childs list of parents is
* searched for the proposed parent. If the proposed parent
* is found in the childs parent list, the function sets
- * *fd_exists_ptr to TRUE. In all other non-error cases,
+ * *fd_exists_ptr to TRUE. In all other non-error cases,
* the function sets *fd_exists_ptr FALSE.
*
- * Return: SUCCEED on success/FAIL on failure. Note that
+ * Return: SUCCEED on success/FAIL on failure. Note that
* *fd_exists_ptr is undefined on failure.
*
* Programmer: John Mainzer
@@ -1355,30 +1355,30 @@ done:
*
* Function: H5C_get_entry_ptr_from_addr()
*
- * Purpose: Debugging function that attempts to look up an entry in the
- * cache by its file address, and if found, returns a pointer
- * to the entry in *entry_ptr_ptr. If the entry is not in the
+ * Purpose: Debugging function that attempts to look up an entry in the
+ * cache by its file address, and if found, returns a pointer
+ * to the entry in *entry_ptr_ptr. If the entry is not in the
* cache, *entry_ptr_ptr is set to NULL.
*
- * WARNING: This call should be used only in debugging
- * routines, and it should be avoided when
+ * WARNING: This call should be used only in debugging
+ * routines, and it should be avoided when
* possible.
*
- * Further, if we ever multi-thread the cache,
- * this routine will have to be either discarded
+ * Further, if we ever multi-thread the cache,
+ * this routine will have to be either discarded
* or heavily re-worked.
*
- * Finally, keep in mind that the entry whose
- * pointer is obtained in this fashion may not
- * be in a stable state.
+ * Finally, keep in mind that the entry whose
+ * pointer is obtained in this fashion may not
+ * be in a stable state.
*
* Note that this function is only defined if NDEBUG
* is not defined.
*
- * As heavy use of this function is almost certainly a
- * bad idea, the metadata cache tracks the number of
- * successful calls to this function, and (if
- * H5C_DO_SANITY_CHECKS is defined) displays any
+ * As heavy use of this function is almost certainly a
+ * bad idea, the metadata cache tracks the number of
+ * successful calls to this function, and (if
+ * H5C_DO_SANITY_CHECKS is defined) displays any
* non-zero count on cache shutdown.
*
* Return: FAIL if error is detected, SUCCEED otherwise.
@@ -1425,7 +1425,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_get_serialization_in_progress
*
- * Purpose: Return the current value of
+ * Purpose: Return the current value of
* cache_ptr->serialization_in_progress.
*
* Return: Current value of cache_ptr->serialization_in_progress.
@@ -1454,7 +1454,7 @@ H5C_get_serialization_in_progress(const H5C_t *cache_ptr)
*
* Function: H5C_cache_is_clean()
*
- * Purpose: Debugging function that verifies that all rings in the
+ * Purpose: Debugging function that verifies that all rings in the
* metadata cache are clean from the outermost ring, inwards
* to the inner ring specified.
*
@@ -1499,16 +1499,16 @@ done:
*
* Function: H5C_verify_entry_type()
*
- * Purpose: Debugging function that attempts to look up an entry in the
+ * Purpose: Debugging function that attempts to look up an entry in the
* cache by its file address, and if found, test to see if its
* type field contains the expted value.
*
* If the specified entry is in cache, *in_cache_ptr is set
- * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending
- * on whether the entries type field matches the expected_type
+ * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending
+ * on whether the entries type field matches the expected_type
* parameter.
*
- * If the target entry is not in cache, *in_cache_ptr is
+ * If the target entry is not in cache, *in_cache_ptr is
* set to FALSE, and *type_ok_ptr is undefined.
*
* Note that this function is only defined if NDEBUG
diff --git a/src/H5Cepoch.c b/src/H5Cepoch.c
index 8bcab9f..6451019 100644
--- a/src/H5Cepoch.c
+++ b/src/H5Cepoch.c
@@ -58,7 +58,7 @@ static herr_t H5C__epoch_marker_get_initial_load_size(void *udata_ptr,
size_t *image_len_ptr);
static herr_t H5C__epoch_marker_get_final_load_size(const void *image_ptr,
size_t image_len_ptr, void *udata_ptr, size_t *actual_len);
-static htri_t H5C__epoch_marker_verify_chksum(const void *image_ptr,
+static htri_t H5C__epoch_marker_verify_chksum(const void *image_ptr,
size_t len, void *udata_ptr);
static void * H5C__epoch_marker_deserialize(const void * image_ptr,
size_t len, void * udata, hbool_t * dirty_ptr);
@@ -71,7 +71,7 @@ static herr_t H5C__epoch_marker_serialize(const H5F_t *f,
void * image_ptr, size_t len, void * thing);
static herr_t H5C__epoch_marker_notify(H5C_notify_action_t action, void *thing);
static herr_t H5C__epoch_marker_free_icr(void * thing);
-static herr_t H5C__epoch_marker_fsf_size(const void H5_ATTR_UNUSED * thing,
+static herr_t H5C__epoch_marker_fsf_size(const void H5_ATTR_UNUSED * thing,
hsize_t H5_ATTR_UNUSED * fsf_size_ptr);
@@ -143,8 +143,8 @@ H5C__epoch_marker_get_final_load_size(const void H5_ATTR_UNUSED *image_ptr,
} /* end H5C__epoch_marker_final_get_load_size() */
-static htri_t
-H5C__epoch_marker_verify_chksum(const void H5_ATTR_UNUSED *image_ptr, size_t H5_ATTR_UNUSED len,
+static htri_t
+H5C__epoch_marker_verify_chksum(const void H5_ATTR_UNUSED *image_ptr, size_t H5_ATTR_UNUSED len,
void H5_ATTR_UNUSED *udata_ptr)
{
FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
@@ -228,7 +228,7 @@ H5C__epoch_marker_free_icr(void H5_ATTR_UNUSED * thing)
} /* end H5C__epoch_marker_free_icr() */
-static herr_t
+static herr_t
H5C__epoch_marker_fsf_size(const void H5_ATTR_UNUSED * thing, hsize_t H5_ATTR_UNUSED *fsf_size_ptr)
{
FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
index de6e660..2139d81 100644
--- a/src/H5Cimage.c
+++ b/src/H5Cimage.c
@@ -102,11 +102,11 @@ static herr_t H5C__decode_cache_image_header(const H5F_t *f,
static herr_t H5C__decode_cache_image_entry(const H5F_t *f,
const H5C_t *cache_ptr, const uint8_t **buf, unsigned entry_num);
#endif /* NDEBUG */ /* only used in assertions */
-static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr,
+static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr,
H5C_cache_entry_t *pf_entry_ptr, H5C_cache_entry_t **fd_children);
static herr_t H5C__encode_cache_image_header(const H5F_t *f,
const H5C_t *cache_ptr, uint8_t **buf);
-static herr_t H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr,
+static herr_t H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr,
uint8_t **buf, unsigned entry_num);
static herr_t H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr);
static void H5C__prep_for_file_close__compute_fd_heights_real(
@@ -146,8 +146,8 @@ H5FL_DEFINE(H5C_cache_entry_t);
*
* Function: H5C_cache_image_pending()
*
- * Purpose: Tests to see if the load of a metadata cache image
- * load is pending (i.e. will be executed on the next
+ * Purpose: Tests to see if the load of a metadata cache image
+ * load is pending (i.e. will be executed on the next
* protect or insert)
*
* Returns TRUE if a cache image load is pending, and FALSE
@@ -179,16 +179,16 @@ H5C_cache_image_pending(const H5C_t *cache_ptr)
/*-------------------------------------------------------------------------
* Function: H5C_cache_image_status()
*
- * Purpose: Examine the metadata cache associated with the supplied
- * instance of H5F_t to determine whether the load of a
- * cache image has either been queued or executed, and if
+ * Purpose: Examine the metadata cache associated with the supplied
+ * instance of H5F_t to determine whether the load of a
+ * cache image has either been queued or executed, and if
* construction of a cache image has been requested.
*
* This done, it set *load_ci_ptr to TRUE if a cache image
* has either been loaded or a load has been requested, and
* to FALSE otherwise.
*
- * Similarly, set *write_ci_ptr to TRUE if construction of
+ * Similarly, set *write_ci_ptr to TRUE if construction of
* a cache image has been requested, and to FALSE otherwise.
*
* Return: SUCCEED on success, and FAIL on failure.
@@ -213,7 +213,7 @@ H5C_cache_image_status(H5F_t * f, hbool_t *load_ci_ptr, hbool_t *write_ci_ptr)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(load_ci_ptr);
HDassert(write_ci_ptr);
-
+
*load_ci_ptr = cache_ptr->load_image || cache_ptr->image_loaded;
*write_ci_ptr = cache_ptr->image_ctl.generate_image;
@@ -224,7 +224,7 @@ H5C_cache_image_status(H5F_t * f, hbool_t *load_ci_ptr, hbool_t *write_ci_ptr)
/*-------------------------------------------------------------------------
* Function: H5C__construct_cache_image_buffer()
*
- * Purpose: Allocate a buffer of size cache_ptr->image_len, and
+ * Purpose: Allocate a buffer of size cache_ptr->image_len, and
* load it with an image of the metadata cache image block.
*
* Note that by the time this function is called, the cache
@@ -330,7 +330,7 @@ H5C__construct_cache_image_buffer(H5F_t * f, H5C_t *cache_ptr)
HDassert((cache_ptr->image_entries)[u].type_id == (fake_cache_ptr->image_entries)[u].type_id);
HDassert((cache_ptr->image_entries)[u].lru_rank == (fake_cache_ptr->image_entries)[u].lru_rank);
HDassert((cache_ptr->image_entries)[u].is_dirty == (fake_cache_ptr->image_entries)[u].is_dirty);
- /* don't check image_fd_height as it is not stored in
+ /* don't check image_fd_height as it is not stored in
* the metadata cache image block.
*/
HDassert((cache_ptr->image_entries)[u].fd_child_count == (fake_cache_ptr->image_entries)[u].fd_child_count);
@@ -346,7 +346,7 @@ H5C__construct_cache_image_buffer(H5F_t * f, H5C_t *cache_ptr)
(fake_cache_ptr->image_entries)[u].fd_parent_addrs = (haddr_t *)H5MM_xfree((fake_cache_ptr->image_entries)[u].fd_parent_addrs);
(fake_cache_ptr->image_entries)[u].fd_parent_count = 0;
} /* end if */
- else
+ else
HDassert((fake_cache_ptr->image_entries)[u].fd_parent_count == 0);
HDassert((cache_ptr->image_entries)[u].image_ptr);
@@ -431,28 +431,28 @@ done:
* Function: H5C__deserialize_prefetched_entry()
*
* Purpose: Deserialize the supplied prefetched entry entry, and return
- * a pointer to the deserialized entry in *entry_ptr_ptr.
+ * a pointer to the deserialized entry in *entry_ptr_ptr.
* If successful, remove the prefetched entry from the cache,
* and free it. Insert the deserialized entry into the cache.
*
- * Note that the on disk image of the entry is not freed --
+ * Note that the on disk image of the entry is not freed --
* a pointer to it is stored in the deserialized entries'
* image_ptr field, and its image_up_to_date field is set to
* TRUE unless the entry is dirtied by the deserialize call.
*
* If the prefetched entry is a flush dependency child,
- * destroy that flush dependency prior to calling the
+ * destroy that flush dependency prior to calling the
* deserialize callback. If appropriate, the flush dependency
* relationship will be recreated by the cache client.
*
* If the prefetched entry is a flush dependency parent,
- * destroy the flush dependency relationship with all its
+ * destroy the flush dependency relationship with all its
* children. As all these children must be prefetched entries,
- * recreate these flush dependency relationships with
+ * recreate these flush dependency relationships with
* deserialized entry after it is inserted in the cache.
*
- * Since deserializing a prefetched entry is semantically
- * equivalent to a load, issue an entry loaded nofification
+ * Since deserializing a prefetched entry is semantically
+ * equivalent to a load, issue an entry loaded nofification
* if the notify callback is defined.
*
* Return: SUCCEED on success, and FAIL on failure.
@@ -468,22 +468,22 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
H5C_cache_entry_t **entry_ptr_ptr, const H5C_class_t *type,
haddr_t addr, void *udata)
{
- hbool_t dirty = FALSE; /* Flag indicating whether thing was
- * dirtied during deserialize
+ hbool_t dirty = FALSE; /* Flag indicating whether thing was
+ * dirtied during deserialize
*/
size_t len; /* Size of image in file */
void * thing = NULL; /* Pointer to thing loaded */
H5C_cache_entry_t * pf_entry_ptr; /* pointer to the prefetched entry */
/* supplied in *entry_ptr_ptr. */
- H5C_cache_entry_t * ds_entry_ptr; /* Alias for thing loaded, as cache
- * entry
+ H5C_cache_entry_t * ds_entry_ptr; /* Alias for thing loaded, as cache
+ * entry
*/
H5C_cache_entry_t** fd_children = NULL; /* Pointer to a dynamically */
/* allocated array of pointers to */
/* the flush dependency children of */
/* the prefetched entry, or NULL if */
/* that array does not exist. */
- unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG |
+ unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG |
H5C__FLUSH_CLEAR_ONLY_FLAG);
int i;
herr_t ret_value = SUCCEED; /* Return value */
@@ -513,8 +513,8 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
/* verify absence of prohibited or unsupported type flag combinations */
HDassert(!(type->flags & H5C__CLASS_SKIP_READS));
-
- /* Can't see how skip reads could be usefully combined with
+
+ /* Can't see how skip reads could be usefully combined with
* either the speculative read flag. Hence disallow.
*/
HDassert(!((type->flags & H5C__CLASS_SKIP_READS) &&
@@ -535,7 +535,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
HDassert(pf_entry_ptr->flush_dep_parent[i]->flush_dep_nchildren > 0);
HDassert(pf_entry_ptr->fd_parent_addrs);
HDassert(pf_entry_ptr->flush_dep_parent[i]->addr == pf_entry_ptr->fd_parent_addrs[i]);
-
+
if(H5C_destroy_flush_dependency(pf_entry_ptr->flush_dep_parent[i], pf_entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry parent flush dependency")
@@ -543,11 +543,11 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
} /* end for */
HDassert(pf_entry_ptr->flush_dep_nparents == 0);
- /* If *pf_entry_ptr is a flush dependency parent, destroy its flush
- * dependency relationships with all its children (which must be
+ /* If *pf_entry_ptr is a flush dependency parent, destroy its flush
+ * dependency relationships with all its children (which must be
* prefetched entries as well).
*
- * These flush dependency relationships will have to be restored
+ * These flush dependency relationships will have to be restored
* after the deserialized entry is inserted into the cache in order
* to transfer these relationships to the new entry. Hence save the
* pointers to the flush dependency children of *pf_enty_ptr for later
@@ -561,16 +561,16 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry child flush dependency(s).")
} /* end if */
- /* Since the size of the on disk image is known exactly, there is
- * no need for either a call to the get_initial_load_size() callback,
+ /* Since the size of the on disk image is known exactly, there is
+ * no need for either a call to the get_initial_load_size() callback,
* or retries if the H5C__CLASS_SPECULATIVE_LOAD_FLAG flag is set.
* Similarly, there is no need to clamp possible reads beyond
* EOF.
*/
len = pf_entry_ptr->size;
- /* Deserialize the prefetched on-disk image of the entry into the
- * native memory form
+ /* Deserialize the prefetched on-disk image of the entry into the
+ * native memory form
*/
if(NULL == (thing = type->deserialize(pf_entry_ptr->image_ptr, len, udata, &dirty)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't deserialize image")
@@ -587,14 +587,14 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
*
* HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
*
- * note that type ids 5 & 6 are associated with object headers in the
+ * note that type ids 5 & 6 are associated with object headers in the
* metadata cache.
*
* When we get to using H5C for other purposes, we may wish to
* tighten up the assert so that the loophole only applies to the
* metadata cache.
*
- * Note that at present, dirty can't be set to true with prefetched
+ * Note that at present, dirty can't be set to true with prefetched
* entries. However this may change, so include this functionality
* against that posibility.
*
@@ -682,8 +682,8 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
/* We have successfully deserialized the prefetched entry.
*
* Before we return a pointer to the deserialized entry, we must remove
- * the prefetched entry from the cache, discard it, and replace it with
- * the deserialized entry. Note that we do not free the prefetched
+ * the prefetched entry from the cache, discard it, and replace it with
+ * the deserialized entry. Note that we do not free the prefetched
* entries image, as that has been transferred to the deserialized
* entry.
*
@@ -695,11 +695,11 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
* 1) Set pf_entry_ptr->image_ptr to NULL. Since we have already
* transferred the buffer containing the image to *ds_entry_ptr,
* this is not a memory leak.
- *
+ *
* 2) Call H5C__flush_single_entry() with the H5C__FLUSH_INVALIDATE_FLAG
* and H5C__FLUSH_CLEAR_ONLY_FLAG flags set.
*/
- pf_entry_ptr->image_ptr = NULL;
+ pf_entry_ptr->image_ptr = NULL;
if(pf_entry_ptr->is_dirty) {
HDassert(pf_entry_ptr->in_slist);
flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
@@ -723,7 +723,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, ds_entry_ptr, FAIL)
- /* Deserializing a prefetched entry is the conceptual equivalent of
+ /* Deserializing a prefetched entry is the conceptual equivalent of
* loading it from file. If the deserialized entry has a notify callback,
* send an "after load" notice now that the deserialized entry is fully
* integrated into the cache.
@@ -732,9 +732,9 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
(ds_entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, ds_entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry loaded into cache")
- /* Restore flush dependencies with the flush dependency children of
- * of the prefetched entry. Note that we must protect *ds_entry_ptr
- * before the call to avoid triggering sanity check failures, and
+ /* Restore flush dependencies with the flush dependency children of
+ * of the prefetched entry. Note that we must protect *ds_entry_ptr
+ * before the call to avoid triggering sanity check failures, and
* then unprotect it afterwards.
*/
i = 0;
@@ -798,8 +798,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__free_image_entries_array
*
- * Purpose: If the image entries array exists, free the image
- * associated with each entry, and then free the image
+ * Purpose: If the image entries array exists, free the image
+ * associated with each entry, and then free the image
* entries array proper.
*
* Note that by the time this function is called, the cache
@@ -834,7 +834,7 @@ H5C__free_image_entries_array(H5C_t * cache_ptr)
/* Get pointer to image entry */
ie_ptr = &((cache_ptr->image_entries)[u]);
- /* Sanity checks */
+ /* Sanity checks */
HDassert(ie_ptr);
HDassert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC);
HDassert(ie_ptr->image_ptr);
@@ -866,14 +866,14 @@ H5C__free_image_entries_array(H5C_t * cache_ptr)
/*-------------------------------------------------------------------------
* Function: H5C_force_cache_image_load()
*
- * Purpose: On rare occasions, it is necessary to run
+ * Purpose: On rare occasions, it is necessary to run
* H5MF_tidy_self_referential_fsm_hack() prior to the first
- * metadata cache access. This is a problem as if there is a
- * cache image at the end of the file, that routine will
+ * metadata cache access. This is a problem as if there is a
+ * cache image at the end of the file, that routine will
* discard it.
*
* We solve this issue by calling this function, which will
- * load the cache image and then call
+ * load the cache image and then call
* H5MF_tidy_self_referential_fsm_hack() to discard it.
*
* Return: SUCCEED on success, and FAIL on failure.
@@ -1053,12 +1053,12 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr)
if ( ( NULL == aux_ptr ) || ( aux_ptr->mpi_rank == 0 ) ) {
- HDassert((NULL == aux_ptr) ||
+ HDassert((NULL == aux_ptr) ||
(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC));
#endif /* H5_HAVE_PARALLEL */
/* Read the buffer (if serial access, or rank 0 of parallel access) */
- if(H5F_block_read(f, H5FD_MEM_SUPER, cache_ptr->image_addr,
+ if(H5F_block_read(f, H5FD_MEM_SUPER, cache_ptr->image_addr,
cache_ptr->image_len, cache_ptr->image_buffer) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_READERROR, FAIL, "Can't read metadata cache image block")
@@ -1068,9 +1068,9 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr)
if ( aux_ptr ) {
/* Broadcast cache image */
- if ( MPI_SUCCESS !=
- (mpi_result = MPI_Bcast(cache_ptr->image_buffer,
- (int)cache_ptr->image_len, MPI_BYTE,
+ if ( MPI_SUCCESS !=
+ (mpi_result = MPI_Bcast(cache_ptr->image_buffer,
+ (int)cache_ptr->image_len, MPI_BYTE,
0, aux_ptr->mpi_comm)) )
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
@@ -1080,9 +1080,9 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr)
else if ( aux_ptr ) {
/* Retrieve the contents of the metadata cache image from process 0 */
- if ( MPI_SUCCESS !=
- (mpi_result = MPI_Bcast(cache_ptr->image_buffer,
- (int)cache_ptr->image_len, MPI_BYTE,
+ if ( MPI_SUCCESS !=
+ (mpi_result = MPI_Bcast(cache_ptr->image_buffer,
+ (int)cache_ptr->image_len, MPI_BYTE,
0, aux_ptr->mpi_comm)) )
HMPI_GOTO_ERROR(FAIL, "can't receive cache image MPI_Bcast", \
@@ -1128,14 +1128,14 @@ H5C__load_cache_image(H5F_t *f)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- /* If the image address is defined, load the image, decode it,
- * and insert its contents into the metadata cache.
+ /* If the image address is defined, load the image, decode it,
+ * and insert its contents into the metadata cache.
*
- * Note that under normal operating conditions, it is an error if the
+ * Note that under normal operating conditions, it is an error if the
* image address is HADDR_UNDEF. However, to facilitate testing,
* we allow this special value of the image address which means that
- * no image exists, and that the load operation should be skipped
- * silently.
+ * no image exists, and that the load operation should be skipped
+ * silently.
*/
if(H5F_addr_defined(cache_ptr->image_addr)) {
/* Sanity checks */
@@ -1184,25 +1184,25 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_load_cache_image_on_next_protect()
*
- * Purpose: Note the fact that a metadata cache image superblock
+ * Purpose: Note the fact that a metadata cache image superblock
* extension message exists, along with the base address
* and length of the metadata cache image block.
*
- * Once this notification is received the metadata cache
- * image block must be read, decoded, and loaded into the
+ * Once this notification is received the metadata cache
+ * image block must be read, decoded, and loaded into the
* cache on the next call to H5C_protect().
*
- * Further, if the file is opened R/W, the metadata cache
- * image superblock extension message must be deleted from
+ * Further, if the file is opened R/W, the metadata cache
+ * image superblock extension message must be deleted from
* the superblock extension and the image block freed
*
* Contrawise, if the file is openened R/O, the metadata
* cache image superblock extension message and image block
* must be left as is. Further, any dirty entries in the
- * cache image block must be marked as clean to avoid
+ * cache image block must be marked as clean to avoid
* attempts to write them on file close.
*
- * Return: SUCCEED
+ * Return: SUCCEED
*
* Programmer: John Mainzer
* 7/6/15
@@ -1291,41 +1291,41 @@ H5C__image_entry_cmp(const void *_entry1, const void *_entry2)
/*-------------------------------------------------------------------------
* Function: H5C__prep_image_for_file_close
*
- * Purpose: The objective of the call is to allow the metadata cache
- * to do any preparatory work prior to generation of a
+ * Purpose: The objective of the call is to allow the metadata cache
+ * to do any preparatory work prior to generation of a
* cache image.
*
- * In particular, the cache must
+ * In particular, the cache must
*
* 1) serialize all its entries,
*
- * 2) compute the size of the metadata cache image,
+ * 2) compute the size of the metadata cache image,
*
* 3) allocate space for the metadata cache image, and
*
* 4) setup the metadata cache image superblock extension
- * message with the address and size of the metadata
+ * message with the address and size of the metadata
* cache image.
*
- * The parallel case is complicated by the fact that
- * while all metadata caches must contain the same set of
- * dirty entries, there is no such requirement for clean
+ * The parallel case is complicated by the fact that
+ * while all metadata caches must contain the same set of
+ * dirty entries, there is no such requirement for clean
* entries or the order that entries appear in the LRU.
*
* Thus, there is no requirement that different processes
* will construct cache images of the same size.
*
- * This is not a major issue as long as all processes include
- * the same set of dirty entries in the cache -- as they
- * currently do (note that this will change when we implement
- * the ageout feature). Since only the process zero cache
- * writes the cache image, all that is necessary is to
- * broadcast the process zero cache size for use in the
- * superblock extension messages and cache image block
+ * This is not a major issue as long as all processes include
+ * the same set of dirty entries in the cache -- as they
+ * currently do (note that this will change when we implement
+ * the ageout feature). Since only the process zero cache
+ * writes the cache image, all that is necessary is to
+ * broadcast the process zero cache size for use in the
+ * superblock extension messages and cache image block
* allocations.
*
- * Note: At present, cache image is disabled in the
- * parallel case as the new collective metadata write
+ * Note: At present, cache image is disabled in the
+ * parallel case as the new collective metadata write
* code must be modified to support cache image.
*
* Return: Non-negative on success/Negative on failure
@@ -1354,8 +1354,8 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(image_generated);
- /* If the file is opened and closed without any access to
- * any group or data set, it is possible that the cache image (if
+ /* If the file is opened and closed without any access to
+ * any group or data set, it is possible that the cache image (if
* it exists) has not been read yet. Do this now if required.
*/
if(cache_ptr->load_image) {
@@ -1365,15 +1365,15 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
} /* end if */
/* Before we start to generate the cache image (if requested), verify
- * that the superblock supports superblock extension messages, and
+ * that the superblock supports superblock extension messages, and
* silently cancel any request for a cache image if it does not.
*
* Ideally, we would do this when the cache image is requested,
- * but the necessary information is not necessary available at that
+ * but the necessary information is not necessary available at that
* time -- hence this last minute check.
*
- * Note that under some error conditions, the superblock will be
- * undefined in this case as well -- if so, assume that the
+ * Note that under some error conditions, the superblock will be
+ * undefined in this case as well -- if so, assume that the
* superblock does not support superblock extension messages.
* Also verify that the file's high_bound is at least release
* 1.10.x, otherwise cancel the request for a cache image
@@ -1390,17 +1390,17 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
/* Generate the cache image, if requested */
if(cache_ptr->image_ctl.generate_image) {
/* Create the cache image super block extension message.
- *
+ *
* Note that the base address and length of the metadata cache
* image are undefined at this point, and thus will have to be
* updated later.
*
- * Create the super block extension message now so that space
+ * Create the super block extension message now so that space
* is allocated for it (if necessary) before we allocate space
* for the cache image block.
*
- * To simplify testing, do this only if the
- * H5C_CI__GEN_MDCI_SBE_MESG bit is set in
+ * To simplify testing, do this only if the
+ * H5C_CI__GEN_MDCI_SBE_MESG bit is set in
* cache_ptr->image_ctl.flags.
*/
if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDCI_SBE_MESG)
@@ -1411,20 +1411,20 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
if(H5C__serialize_cache(f) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "serialization of the cache failed")
- /* Scan the cache and record data needed to construct the
+ /* Scan the cache and record data needed to construct the
* cache image. In particular, for each entry we must record:
*
* 1) rank in LRU (if entry is in LRU)
*
- * 2) Whether the entry is dirty prior to flush of
+ * 2) Whether the entry is dirty prior to flush of
* cache just prior to close.
*
* 3) Addresses of flush dependency parents (if any).
*
- * 4) Number of flush dependency children (if any).
+ * 4) Number of flush dependency children (if any).
*
- * In passing, also compute the size of the metadata cache
- * image. With the recent modifications of the free space
+ * In passing, also compute the size of the metadata cache
+ * image. With the recent modifications of the free space
* manager code, this size should be correct.
*/
if(H5C__prep_for_file_close__scan_entries(f, cache_ptr) < 0)
@@ -1432,7 +1432,7 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
HDassert(HADDR_UNDEF == cache_ptr->image_addr);
#ifdef H5_HAVE_PARALLEL
- /* In the parallel case, overwrite the image_len with the
+ /* In the parallel case, overwrite the image_len with the
* value computed by process 0.
*/
if(cache_ptr->aux_ptr) { /* we have multiple processes */
@@ -1453,17 +1453,17 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
else {
if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
-
+
aux_ptr->p0_image_len = p0_image_len;
} /* end else */
- /* Allocate space for a cache image of size equal to that
- * computed by the process 0. This may be different from
+ /* Allocate space for a cache image of size equal to that
+ * computed by the process 0. This may be different from
* cache_ptr->image_data_len if mpi_rank != 0. However, since
- * cache image write is suppressed on all processes other than
+ * cache image write is suppressed on all processes other than
* process 0, this doesn't matter.
*
- * Note that we allocate the cache image directly from the file
+ * Note that we allocate the cache image directly from the file
* driver so as to avoid unsettling the free space managers.
*/
if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, H5FD_MEM_SUPER, f,
@@ -1472,8 +1472,8 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
} /* end if */
else
#endif /* H5_HAVE_PARALLEL */
- /* Allocate the cache image block. Note that we allocate this
- * this space directly from the file driver so as to avoid
+ /* Allocate the cache image block. Note that we allocate this
+ * this space directly from the file driver so as to avoid
* unsettling the free space managers.
*/
if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, H5FD_MEM_SUPER, f,
@@ -1497,25 +1497,25 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
* image block to the next alignment boundary, and then setting
* the image_data_len to the actual size of the cache_image.
*
- * On the off chance that there is some other way to get a
+ * On the off chance that there is some other way to get a
* a fragment on a cache image allocation, leave the following
* assertion in the code so we will find out.
*/
HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1));
/* Eventually it will be possible for the length of the cache image
- * block on file to be greater than the size of the data it
- * contains. However, for now they must be the same. Set
+ * block on file to be greater than the size of the data it
+ * contains. However, for now they must be the same. Set
* cache_ptr->image_len accordingly.
*/
cache_ptr->image_len = cache_ptr->image_data_len;
- /* update the metadata cache image superblock extension
- * message with the new cache image block base address and
+ /* update the metadata cache image superblock extension
+ * message with the new cache image block base address and
* length.
*
- * to simplify testing, do this only if the
- * H5C_CI__GEN_MDC_IMAGE_BLK bit is set in
+ * to simplify testing, do this only if the
+ * H5C_CI__GEN_MDC_IMAGE_BLK bit is set in
* cache_ptr->image_ctl.flags.
*/
if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
@@ -1525,18 +1525,18 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
/* At this point:
*
* 1) space in the file for the metadata cache image
- * is allocated,
+ * is allocated,
*
- * 2) the metadata cache image superblock extension
- * message exists and (if so configured) contains
+ * 2) the metadata cache image superblock extension
+ * message exists and (if so configured) contains
* the correct data,
*
- * 3) All entries in the cache that will appear in the
+ * 3) All entries in the cache that will appear in the
* cache image are serialized with up to date images.
*
* Since we just updated the cache image message,
* the super block extension message is dirty. However,
- * since the superblock and the superblock extension
+ * since the superblock and the superblock extension
* can't be included in the cache image, this is a non-
* issue.
*
@@ -1544,16 +1544,16 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
* the cache are marked as such, and we have a count
* of same.
*
- * 5) Flush dependency heights are calculated for all
+ * 5) Flush dependency heights are calculated for all
* entries that will be included in the cache image.
*
* If there are any entries to be included in the metadata cache
- * image, allocate, populate, and sort the image_entries array.
+ * image, allocate, populate, and sort the image_entries array.
*
- * If the metadata cache image will be empty, delete the
- * metadata cache image superblock extension message, set
+ * If the metadata cache image will be empty, delete the
+ * metadata cache image superblock extension message, set
* cache_ptr->image_ctl.generate_image to FALSE. This will
- * allow the file close to continue normally without the
+ * allow the file close to continue normally without the
* unnecessary generation of the metadata cache image.
*/
if(cache_ptr->num_entries_in_image > 0) {
@@ -1567,9 +1567,9 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
else { /* cancel creation of metadata cache image */
HDassert(cache_ptr->image_entries == NULL);
- /* To avoid breaking the control flow tests, only delete
- * the mdci superblock extension message if the
- * H5C_CI__GEN_MDC_IMAGE_BLK flag is set in
+ /* To avoid breaking the control flow tests, only delete
+ * the mdci superblock extension message if the
+ * H5C_CI__GEN_MDC_IMAGE_BLK flag is set in
* cache_ptr->image_ctl.flags.
*/
if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
@@ -1591,8 +1591,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_set_cache_image_config
*
- * Purpose: If *config_ptr contains valid data, copy it into the
- * image_ctl field of *cache_ptr. Make adjustments for
+ * Purpose: If *config_ptr contains valid data, copy it into the
+ * image_ctl field of *cache_ptr. Make adjustments for
* changes in configuration as required.
*
* If the file is open read only, silently
@@ -1601,9 +1601,9 @@ done:
*
* Note that in addition to being inapplicable in the
* read only case, cache image is also inapplicable if
- * the superblock does not support superblock extension
- * messages. Unfortunately, this information need not
- * be available at this point. Thus we check for this
+ * the superblock does not support superblock extension
+ * messages. Unfortunately, this information need not
+ * be available at this point. Thus we check for this
* later, in H5C_prep_for_file_close() and cancel the
* cache image request if appropriate.
*
@@ -1638,7 +1638,7 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr,
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid cache image configuration")
#ifdef H5_HAVE_PARALLEL
- /* The collective metadata write code is not currently compatible
+ /* The collective metadata write code is not currently compatible
* with cache image. Until this is fixed, suppress cache image silently
* if there is more than one process.
* JRM -- 11/8/16
@@ -1652,15 +1652,15 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr,
else {
#endif /* H5_HAVE_PARALLEL */
/* A cache image can only be generated if the file is opened read / write
- * and the superblock supports superblock extension messages.
+ * and the superblock supports superblock extension messages.
*
- * However, the superblock version is not available at this point --
+ * However, the superblock version is not available at this point --
* hence we can only check the former requirement now. Do the latter
- * check just before we construct the image..
+ * check just before we construct the image..
*
* If the file is opened read / write, apply the supplied configuration.
*
- * If it is not, set the image configuration to the default, which has
+ * If it is not, set the image configuration to the default, which has
* the effect of silently disabling the cache image if it was requested.
*/
if(H5F_INTENT(f) & H5F_ACC_RDWR)
@@ -1683,7 +1683,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_validate_cache_image_config()
*
- * Purpose: Run a sanity check on the provided instance of struct
+ * Purpose: Run a sanity check on the provided instance of struct
* H5AC_cache_image_config_t.
*
* Do nothing and return SUCCEED if no errors are detected,
@@ -1715,8 +1715,8 @@ H5C_validate_cache_image_config(H5C_cache_image_ctl_t * ctl_ptr)
if(ctl_ptr->save_resize_status != FALSE)
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unexpected value in save_resize_status field")
- /* At present, we do not support prefetched entry ageouts. Thus
- * the entry_ageout field must be set to
+ /* At present, we do not support prefetched entry ageouts. Thus
+ * the entry_ageout field must be set to
* H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE.
*/
if(ctl_ptr->entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE)
@@ -1740,7 +1740,7 @@ done:
* Purpose: Compute the size of the header of the metadata cache
* image block, and return the value.
*
- * Return: Size of the header section of the metadata cache image
+ * Return: Size of the header section of the metadata cache image
* block in bytes.
*
* Programmer: John Mainzer
@@ -1777,7 +1777,7 @@ H5C__cache_image_block_entry_header_size(const H5F_t * f)
* Purpose: Compute the size of the header of the metadata cache
* image block, and return the value.
*
- * Return: Size of the header section of the metadata cache image
+ * Return: Size of the header section of the metadata cache image
* block in bytes.
*
* Programmer: John Mainzer
@@ -1806,9 +1806,9 @@ H5C__cache_image_block_header_size(const H5F_t * f)
/*-------------------------------------------------------------------------
* Function: H5C__decode_cache_image_header()
*
- * Purpose: Decode the metadata cache image buffer header from the
+ * Purpose: Decode the metadata cache image buffer header from the
* supplied buffer and load the data into the supplied instance
- * of H5C_t. Advances the buffer pointer to the first byte
+ * of H5C_t. Advances the buffer pointer to the first byte
* after the header image, or unchanged on failure.
*
* Return: Non-negative on success/Negative on failure
@@ -1853,7 +1853,7 @@ H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr,
/* Decode flags */
flags = *p++;
- if(flags & H5C__MDCI_HEADER_HAVE_RESIZE_STATUS)
+ if(flags & H5C__MDCI_HEADER_HAVE_RESIZE_STATUS)
have_resize_status = TRUE;
if(have_resize_status)
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "MDC resize status not yet supported")
@@ -1867,7 +1867,7 @@ H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr,
/* Read num entries */
UINT32DECODE(p, cache_ptr->num_entries_in_image);
- if(cache_ptr->num_entries_in_image == 0)
+ if(cache_ptr->num_entries_in_image == 0)
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache entry count")
/* Verify expected length of header */
@@ -1888,13 +1888,13 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__decode_cache_image_entry()
*
- * Purpose: Decode the metadata cache image entry from the supplied
+ * Purpose: Decode the metadata cache image entry from the supplied
* buffer into the supplied instance of H5C_image_entry_t.
* This includes allocating a buffer for the entry image,
- * loading it, and seting ie_ptr->image_ptr to point to
+ * loading it, and seting ie_ptr->image_ptr to point to
* the buffer.
*
- * Advances the buffer pointer to the first byte
+ * Advances the buffer pointer to the first byte
* after the entry, or unchanged on failure.
*
* Return: Non-negative on success/Negative on failure
@@ -1998,8 +1998,8 @@ H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr,
/* Verify expected length of entry image */
if((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f))
HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, FAIL, "Bad entry image len")
-
- /* If parent count greater than zero, allocate array for parent
+
+ /* If parent count greater than zero, allocate array for parent
* addresses, and decode addresses into the array.
*/
if(fd_parent_count > 0) {
@@ -2053,9 +2053,9 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__destroy_pf_entry_child_flush_deps()
*
- * Purpose: Destroy all flush dependencies in this the supplied
+ * Purpose: Destroy all flush dependencies in this the supplied
* prefetched entry is the parent. Note that the children
- * in these flush dependencies must be prefetched entries as
+ * in these flush dependencies must be prefetched entries as
* well.
*
* As this action is part of the process of transferring all
@@ -2063,8 +2063,8 @@ done:
* prefetched entry, ensure that the data necessary to complete
* the transfer is retained.
*
- * Note: The current implementation of this function is
- * quite inefficient -- mostly due to the current
+ * Note: The current implementation of this function is
+ * quite inefficient -- mostly due to the current
* implementation of flush dependencies. This should
* be fixed at some point.
*
@@ -2076,7 +2076,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr,
+H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr,
H5C_cache_entry_t *pf_entry_ptr, H5C_cache_entry_t **fd_children)
{
H5C_cache_entry_t * entry_ptr;
@@ -2103,8 +2103,8 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr,
while(entry_ptr != NULL) {
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- /* Here we look at entry_ptr->flush_dep_nparents and not
- * entry_ptr->fd_parent_count as it is possible that some
+ /* Here we look at entry_ptr->flush_dep_nparents and not
+ * entry_ptr->fd_parent_count as it is possible that some
* or all of the prefetched flush dependency child relationships
* have already been destroyed.
*/
@@ -2145,9 +2145,9 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr,
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry child flush dependency")
#ifndef NDEBUG
- /* Sanity check -- verify that the address of the parent
+ /* Sanity check -- verify that the address of the parent
* appears in entry_ptr->fd_parent_addrs. Must do a search,
- * as with flush dependency creates and destroys,
+ * as with flush dependency creates and destroys,
* entry_ptr->fd_parent_addrs and entry_ptr->flush_dep_parent
* can list parents in different order.
*/
@@ -2181,8 +2181,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__encode_cache_image_header()
*
- * Purpose: Encode the metadata cache image buffer header in the
- * supplied buffer. Updates buffer pointer to the first byte
+ * Purpose: Encode the metadata cache image buffer header in the
+ * supplied buffer. Updates buffer pointer to the first byte
* after the header image in the buffer, or unchanged on failure.
*
* Return: Non-negative on success/Negative on failure
@@ -2259,8 +2259,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__encode_cache_image_entry()
*
- * Purpose: Encode the metadata cache image buffer header in the
- * supplied buffer. Updates buffer pointer to the first byte
+ * Purpose: Encode the metadata cache image buffer header in the
+ * supplied buffer. Updates buffer pointer to the first byte
* after the entry in the buffer, or unchanged on failure.
*
* Return: Non-negative on success/Negative on failure
@@ -2271,7 +2271,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf,
+H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf,
unsigned entry_num)
{
H5C_image_entry_t * ie_ptr; /* Pointer to entry to encode */
@@ -2306,13 +2306,13 @@ H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf,
*p++ = (uint8_t)(ie_ptr->type_id);
/* Compose and encode flags */
- if(ie_ptr->is_dirty)
+ if(ie_ptr->is_dirty)
flags |= H5C__MDCI_ENTRY_DIRTY_FLAG;
- if(ie_ptr->lru_rank > 0)
+ if(ie_ptr->lru_rank > 0)
flags |= H5C__MDCI_ENTRY_IN_LRU_FLAG;
if(ie_ptr->fd_child_count > 0)
flags |= H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG;
- if(ie_ptr->fd_parent_count > 0)
+ if(ie_ptr->fd_parent_count > 0)
flags |= H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG;
*p++ = flags;
@@ -2377,51 +2377,51 @@ done:
* flush dependency children. (Recall that the flush dependency
* height of an entry in a flush dependency relationship is the
* length of the longest path from the entry to a leaf entry --
- * that is an entry with flush dependency parents, but no
- * flush dependency children. With the introduction of the
+ * that is an entry with flush dependency parents, but no
+ * flush dependency children. With the introduction of the
* possibility of multiple flush dependency parents, we have
- * a flush partial dependency latice, not a flush dependency
- * tree. But since the partial latice is acyclic, the concept
+ * a flush partial dependency latice, not a flush dependency
+ * tree. But since the partial latice is acyclic, the concept
* of flush dependency height still makes sense.
*
- * The purpose of this function is to compute the flush
+ * The purpose of this function is to compute the flush
* dependency height of all entries that appear in the cache
- * image.
+ * image.
*
- * At present, entries are included or excluded from the
+ * At present, entries are included or excluded from the
* cache image depending upon the ring in which they reside.
* Thus there is no chance that one side of a flush dependency
* will be in the cache image, and the other side not.
*
* However, once we start placing a limit on the size of the
* cache image, or start excluding prefetched entries from
- * the cache image if they haven't been accessed in some
- * number of file close / open cycles, this will no longer
- * be the case.
+ * the cache image if they haven't been accessed in some
+ * number of file close / open cycles, this will no longer
+ * be the case.
*
* In particular, if a flush dependency child is dirty, and
* one of its flush dependency parents is dirty and not in
* the cache image, then the flush dependency child cannot
* be in the cache image without violating flush ordering.
*
- * Observe that a clean flush dependency child can be either
- * in or out of the cache image without effect on flush
+ * Observe that a clean flush dependency child can be either
+ * in or out of the cache image without effect on flush
* dependencies.
*
- * Similarly, a flush dependency parent can always be part
- * of a cache image, regardless of whether it is clean or
+ * Similarly, a flush dependency parent can always be part
+ * of a cache image, regardless of whether it is clean or
* dirty -- but remember that a flush dependency parent can
* also be a flush dependency child.
- *
+ *
* Finally, note that for purposes of the cache image, flush
- * dependency height ends when a flush dependecy relation
+ * dependency height ends when a flush dependecy relation
* passes off the cache image.
*
- * On exit, the flush dependency height of each entry in the
+ * On exit, the flush dependency height of each entry in the
* cache image should be calculated and stored in the cache
* entry. Entries will be removed from the cache image if
* necessary to maintain flush ordering.
- *
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -2447,10 +2447,10 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- /* Remove from the cache image all dirty entries that are
+ /* Remove from the cache image all dirty entries that are
* flush dependency children of dirty entries that are not in the
- * cache image. Must do this, as if we fail to do so, the parent
- * will be written to file before the child. Since it is possible
+ * cache image. Must do this, as if we fail to do so, the parent
+ * will be written to file before the child. Since it is possible
* that the child will have dirty children of its own, this may take
* multiple passes through the index list.
*/
@@ -2484,17 +2484,17 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr)
entry_ptr = entry_ptr->il_next;
} /* while ( entry_ptr != NULL ) */
- } /* while ( ! done ) */
+ } /* while ( ! done ) */
/* at present, entries are included in the cache image if they reside
- * in a specified set of rings. Thus it should be impossible for
- * entries_removed_from_image to be positive. Assert that this is
- * so. Note that this will change when we start aging entries out
+ * in a specified set of rings. Thus it should be impossible for
+ * entries_removed_from_image to be positive. Assert that this is
+ * so. Note that this will change when we start aging entries out
* of the cache image.
*/
HDassert(entries_removed_from_image == 0);
- /* Next, remove from entries in the cache image, references to
+ /* Next, remove from entries in the cache image, references to
* flush dependency parents or children that are not in the cache image.
*/
entry_ptr = cache_ptr->il_head;
@@ -2575,14 +2575,14 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr)
entry_ptr = entry_ptr->il_next;
} /* while (entry_ptr != NULL) */
- /* At present, no extenal parent or child flush dependency links
+ /* At present, no extenal parent or child flush dependency links
* should exist -- hence the following assertions. This will change
* if we support ageout of entries in the cache image.
*/
HDassert(external_child_fd_refs_removed == 0);
HDassert(external_parent_fd_refs_removed == 0);
- /* At this point we should have removed all flush dependencies that
+ /* At this point we should have removed all flush dependencies that
* cross cache image boundaries. Now compute the flush dependency
* heights for all entries in the image.
*
@@ -2600,7 +2600,7 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr)
parent_ptr = entry_ptr->flush_dep_parent[u];
HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- if(parent_ptr->include_in_image && parent_ptr->image_fd_height <= 0)
+ if(parent_ptr->include_in_image && parent_ptr->image_fd_height <= 0)
H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, 1);
} /* end for */
} /* end if */
@@ -2623,37 +2623,37 @@ done:
*
* The basic observation behind this function is as follows:
*
- * Suppose you have an entry E with a flush dependency
- * height of X. Then the parents of E must all have
+ * Suppose you have an entry E with a flush dependency
+ * height of X. Then the parents of E must all have
* flush dependency X + 1 or greater.
*
* Use this observation to compute flush dependency height
* of all entries in the cache image via the following
* recursive algorithm:
*
- * 1) On entry, set the flush dependency height of the
+ * 1) On entry, set the flush dependency height of the
* supplied cache entry to the supplied value.
*
- * 2) Examine all the flush dependency parents of the
- * supplied entry.
+ * 2) Examine all the flush dependency parents of the
+ * supplied entry.
*
- * If the parent is in the cache image, and has flush
+ * If the parent is in the cache image, and has flush
* dependency height less than or equal to the flush
- * dependency height of the current entry, call the
+ * dependency height of the current entry, call the
* recursive routine on the parent with flush dependency
- * height equal to the flush dependency height of the
+ * height equal to the flush dependency height of the
* child plus 1.
*
* Otherwise do nothing.
*
* Observe that if the flush dependency height of all entries
- * in the image is initialized to zero, and if this recursive
- * function is called with flush dependency height 0 on all
- * entries in the cache image with FD parents in the image,
- * but without FD children in the image, the correct flush
- * dependency height should be set for all entries in the
+ * in the image is initialized to zero, and if this recursive
+ * function is called with flush dependency height 0 on all
+ * entries in the cache image with FD parents in the image,
+ * but without FD children in the image, the correct flush
+ * dependency height should be set for all entries in the
* cache image.
- *
+ *
* Return: void
*
* Programmer: John Mainzer
@@ -2698,9 +2698,9 @@ H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_t *entry_ptr,
* Function: H5C__prep_for_file_close__setup_image_entries_array
*
* Purpose: Allocate space for the image_entries array, and load
- * each instance of H5C_image_entry_t in the array with
+ * each instance of H5C_image_entry_t in the array with
* the data necessary to construct the metadata cache image.
- *
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -2758,14 +2758,14 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr)
image_entries[u].ring = entry_ptr->ring;
/* When a prefetched entry is included in the image, store
- * its underlying type id in the image entry, not
+ * its underlying type id in the image entry, not
* H5AC_PREFETCHED_ENTRY_ID. In passing, also increment
* the age (up to H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX).
*/
if(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID) {
image_entries[u].type_id = entry_ptr->prefetch_type_id;
image_entries[u].age = entry_ptr->age + 1;
-
+
if(image_entries[u].age > H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX)
image_entries[u].age = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX;
} /* end if */
@@ -2780,13 +2780,13 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr)
image_entries[u].fd_parent_count = entry_ptr->fd_parent_count;
image_entries[u].fd_parent_addrs = entry_ptr->fd_parent_addrs;
image_entries[u].fd_child_count = entry_ptr->fd_child_count;
- image_entries[u].fd_dirty_child_count =
+ image_entries[u].fd_dirty_child_count =
entry_ptr->fd_dirty_child_count;
image_entries[u].image_ptr = entry_ptr->image_ptr;
- /* Null out entry_ptr->fd_parent_addrs and set
+ /* Null out entry_ptr->fd_parent_addrs and set
* entry_ptr->fd_parent_count to zero so that ownership of the
- * flush dependency parents address array is transferred to the
+ * flush dependency parents address array is transferred to the
* image entry.
*/
entry_ptr->fd_parent_count = 0;
@@ -2819,8 +2819,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__prep_for_file_close__scan_entries
*
- * Purpose: Scan all entries in the metadata cache, and store all
- * entry specific data required for construction of the
+ * Purpose: Scan all entries in the metadata cache, and store all
+ * entry specific data required for construction of the
* metadata cache image block and likely to be discarded
* or modified during the cache flush on file close.
*
@@ -2838,7 +2838,7 @@ done:
*
* Finally, compute the size of the metadata cache image
* block.
- *
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -2891,7 +2891,7 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
HDassert(entry_ptr->image_ptr);
/* Initially, we mark all entries in the rings included
- * in the cache image as being included in the in the
+ * in the cache image as being included in the in the
* image. Depending on circumstances, we may exclude some
* of these entries later.
*/
@@ -2907,7 +2907,7 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
entry_ptr->image_fd_height = 0; /* will compute this later */
/* Initially, include all flush dependency parents in the
- * the list of flush dependencies to be stored in the
+ * the list of flush dependencies to be stored in the
* image. We may remove some or all of these later.
*/
if(entry_ptr->flush_dep_nparents > 0) {
@@ -2915,7 +2915,7 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
* as needed.
*/
if(entry_ptr->flush_dep_nparents == entry_ptr->fd_parent_count ) {
- /* parent addresses array should already be allocated
+ /* parent addresses array should already be allocated
* and of the correct size.
*/
HDassert(entry_ptr->fd_parent_addrs);
@@ -2947,8 +2947,8 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
HDassert(entry_ptr->fd_parent_addrs == NULL);
/* Initially, all flush dependency children are included int
- * the count of flush dependency child relationships to be
- * represented in the cache image. Some or all of these
+ * the count of flush dependency child relationships to be
+ * represented in the cache image. Some or all of these
* may be dropped from the image later.
*/
if(entry_ptr->flush_dep_nchildren > 0) {
@@ -2970,23 +2970,23 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
/* Now compute the flush dependency heights of all flush dependency
* relationships to be represented in the image.
*
- * If all entries in the target rings are included in the
- * image, the flush dependency heights are simply the heights
+ * If all entries in the target rings are included in the
+ * image, the flush dependency heights are simply the heights
* of all flush dependencies in the target rings.
*
- * However, if we restrict appearance in the cache image either
- * by number of entries in the image, restrictions on the number
- * of times a prefetched entry can appear in an image, or image
+ * However, if we restrict appearance in the cache image either
+ * by number of entries in the image, restrictions on the number
+ * of times a prefetched entry can appear in an image, or image
* size, it is possible that flush dependency parents or children
* of entries that are in the image may not be included in the
- * the image. In this case, we must prune all flush dependency
- * relationships that cross the image boundary, and all exclude
- * from the image all dirty flush dependency children that have
- * a dirty flush dependency parent that is not in the image.
+ * the image. In this case, we must prune all flush dependency
+ * relationships that cross the image boundary, and all exclude
+ * from the image all dirty flush dependency children that have
+ * a dirty flush dependency parent that is not in the image.
* This is necessary to preserve the required flush ordering.
- *
- * These details are tended to by the following call to
- * H5C__prep_for_file_close__compute_fd_heights(). Because the
+ *
+ * These details are tended to by the following call to
+ * H5C__prep_for_file_close__compute_fd_heights(). Because the
* exact contents of the image cannot be known until after this
* call, computation of the image size is delayed.
*/
@@ -2994,7 +2994,7 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "computation of flush dependency heights failed?!?")
/* At this point, all entries that will appear in the cache
- * image should be marked correctly. Compute the size of the
+ * image should be marked correctly. Compute the size of the
* cache image.
*/
entries_visited = 0;
@@ -3031,11 +3031,11 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
/* Now scan the LRU list to set the lru_rank fields of all entries
* on the LRU.
*
- * Note that we start with rank 1, and increment by 1 with each
- * entry on the LRU.
+ * Note that we start with rank 1, and increment by 1 with each
+ * entry on the LRU.
*
* Note that manually pinned entryies will have lru_rank -1,
- * and no flush dependency. Putting these entries at the head of
+ * and no flush dependency. Putting these entries at the head of
* the reconstructed LRU should be appropriate.
*/
entry_ptr = cache_ptr->LRU_head_ptr;
@@ -3045,11 +3045,11 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
/* to avoid confusion, don't set lru_rank on epoch markers.
* Note that we still increment the lru_rank, so that the holes
- * in the sequence of entries on the LRU will indicate the
- * locations of epoch markers (if any) when we reconstruct
+ * in the sequence of entries on the LRU will indicate the
+ * locations of epoch markers (if any) when we reconstruct
* the LRU.
*
- * Do not set lru_rank or increment lru_rank for entries
+ * Do not set lru_rank or increment lru_rank for entries
* that will not be included in the cache image.
*/
if(entry_ptr->type->id == H5AC_EPOCH_MARKER_ID)
@@ -3076,9 +3076,9 @@ done:
* Function: H5C__reconstruct_cache_contents()
*
* Purpose: Scan the image buffer, and create a prefetched
- * cache entry for every entry in the buffer. Insert the
- * prefetched entries in the index and the LRU, and
- * reconstruct any flush dependencies. Order the entries
+ * cache entry for every entry in the buffer. Insert the
+ * prefetched entries in the index and the LRU, and
+ * reconstruct any flush dependencies. Order the entries
* in the LRU as indicated by the stored lru_ranks.
*
* Return: SUCCEED on success, and FAIL on failure.
@@ -3127,11 +3127,11 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr)
if(NULL == (pf_entry_ptr = H5C__reconstruct_cache_entry(f, cache_ptr, &p)))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "reconstruction of cache entry failed")
- /* Note that we make no checks on available cache space before
+ /* Note that we make no checks on available cache space before
* inserting the reconstructed entry into the metadata cache.
*
* This is OK since the cache must be almost empty at the beginning
- * of the process, and since we check cache size at the end of the
+ * of the process, and since we check cache size at the end of the
* reconstruction process.
*/
@@ -3147,7 +3147,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr)
H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, pf_entry_ptr->is_dirty)
- /* If the prefetched entry is the child in one or more flush
+ /* If the prefetched entry is the child in one or more flush
* dependency relationships, recreate those flush dependencies.
*/
for(v = 0; v < pf_entry_ptr->fd_parent_count; v++) {
@@ -3171,7 +3171,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr)
*/
H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, parent_ptr, FAIL)
parent_ptr->is_protected = TRUE;
-
+
/* Setup the flush dependency */
if(H5C_create_flush_dependency(parent_ptr, pf_entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore flush dependency")
@@ -3211,7 +3211,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr)
pf_entry_ptr = pf_entry_ptr->il_next;
} /* end while */
- /* Scan the LRU, and verify the expected ordering of the
+ /* Scan the LRU, and verify the expected ordering of the
* prefetched entries.
*/
{
@@ -3233,7 +3233,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr)
HDassert((entry_ptr->lru_rank == -1) ||
(entry_ptr->lru_rank > i));
- if ( ( entry_ptr->lru_rank > 1 ) &&
+ if ( ( entry_ptr->lru_rank > 1 ) &&
( entry_ptr->lru_rank > i + 1 ) )
lru_rank_holes += entry_ptr->lru_rank - (i + 1);
@@ -3245,9 +3245,9 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr)
entry_ptr = entry_ptr->next;
} /* end while */
- /* Holes in the sequences of LRU ranks can appear due to epoch
+ /* Holes in the sequences of LRU ranks can appear due to epoch
* markers. They are left in to allow re-insertion of the
- * epoch markers on reconstruction of the cache -- thus
+ * epoch markers on reconstruction of the cache -- thus
* the following sanity check will have to be revised when
* we add code to store and restore adaptive resize status.
*/
@@ -3255,7 +3255,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr)
} /* end block */
#endif /* NDEBUG */
- /* Check to see if the cache is oversize, and evict entries as
+ /* Check to see if the cache is oversize, and evict entries as
* necessary to remain within limits.
*/
if(cache_ptr->index_size >= cache_ptr->max_cache_size) {
@@ -3289,7 +3289,7 @@ done:
* Return a pointer to the newly allocated cache entry,
* or NULL on failure.
*
- * Return: Pointer to the new instance of H5C_cache_entry on success,
+ * Return: Pointer to the new instance of H5C_cache_entry on success,
* or NULL on failure.
*
* Programmer: John Mainzer
@@ -3347,7 +3347,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
is_fd_child = TRUE;
#endif /* NDEBUG */ /* only used in assertions */
- /* Force dirty entries to clean if the file read only -- must do
+ /* Force dirty entries to clean if the file read only -- must do
* this as otherwise the cache will attempt to write them on file
* close. Since the file is R/O, the metadata cache image superblock
* extension message and the cache image block will not be removed.
@@ -3355,7 +3355,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
*
* However, if the dirty entry (marked clean for purposes of the R/O
* file open) is evicted and then referred to, the cache will read
- * either invalid or obsolete data from the file. Handle this by
+ * either invalid or obsolete data from the file. Handle this by
* setting the prefetched_dirty field, and hiding such entries from
* the eviction candidate selection algorithm.
*/
@@ -3375,7 +3375,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
/* Decode dirty dependency child count */
UINT16DECODE(p, pf_entry_ptr->fd_dirty_child_count);
- if(!file_is_rw)
+ if(!file_is_rw)
pf_entry_ptr->fd_dirty_child_count = 0;
if(pf_entry_ptr->fd_dirty_child_count > pf_entry_ptr->fd_child_count)
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid dirty flush dependency child count")
@@ -3401,8 +3401,8 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
/* Verify expected length of entry image */
if((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f))
HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, NULL, "Bad entry image len")
-
- /* If parent count greater than zero, allocate array for parent
+
+ /* If parent count greater than zero, allocate array for parent
* addresses, and decode addresses into the array.
*/
if(pf_entry_ptr->fd_parent_count > 0) {
@@ -3457,7 +3457,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__write_cache_image_superblock_msg
*
- * Purpose: Write the cache image superblock extension message,
+ * Purpose: Write the cache image superblock extension message,
* creating if specified.
*
* In general, the size and location of the cache image block
@@ -3560,7 +3560,7 @@ H5C__write_cache_image(H5F_t *f, const H5C_t *cache_ptr)
} /* end if */
} /* end block */
#endif /* H5_HAVE_PARALLEL */
-
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__write_cache_image() */
diff --git a/src/H5Clog.c b/src/H5Clog.c
index cf9b7e8..794d6bb 100644
--- a/src/H5Clog.c
+++ b/src/H5Clog.c
@@ -313,7 +313,7 @@ H5C_get_logging_status(const H5C_t *cache, /*OUT*/ hbool_t *is_enabled,
*-------------------------------------------------------------------------
*/
herr_t
-H5C_log_write_create_cache_msg(H5C_t *cache, herr_t fxn_ret_value)
+H5C_log_write_create_cache_msg(H5C_t *cache, herr_t fxn_ret_value)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -348,7 +348,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_log_write_destroy_cache_msg(H5C_t *cache)
+H5C_log_write_destroy_cache_msg(H5C_t *cache)
{
herr_t ret_value = SUCCEED; /* Return value */
diff --git a/src/H5Clog_json.c b/src/H5Clog_json.c
index dd9e9b2..051a81d 100644
--- a/src/H5Clog_json.c
+++ b/src/H5Clog_json.c
@@ -143,7 +143,7 @@ static H5C_log_class_t H5C_json_log_class_g = {
/*-------------------------------------------------------------------------
* Function: H5C__json_write_log_message
*
- * Purpose: Write a message to the log file and flush the file.
+ * Purpose: Write a message to the log file and flush the file.
* The message string is neither modified nor freed.
*
* Return: SUCCEED/FAIL
@@ -171,7 +171,7 @@ H5C__json_write_log_message(H5C_log_json_udata_t *json_udata)
if((int)n_chars != HDfprintf(json_udata->outfile, json_udata->message))
HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "error writing log message")
HDmemset((void *)(json_udata->message), 0, (size_t)(n_chars * sizeof(char)));
-
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__json_write_log_message() */
@@ -222,7 +222,7 @@ H5C_log_json_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi
if(NULL == (log_info->udata = H5MM_calloc(sizeof(H5C_log_json_udata_t))))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed")
json_udata = (H5C_log_json_udata_t *)(log_info->udata);
-
+
/* Allocate memory for the message buffer */
if(NULL == (json_udata->message = (char *)H5MM_calloc(H5C_MAX_JSON_LOG_MSG_SIZE * sizeof(char))))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed")
@@ -341,7 +341,7 @@ H5C__json_write_start_log_msg(void *udata)
HDassert(json_udata->message);
/* Create the log message string (opens the JSON array) */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\n\
\"HDF5 metadata cache log messages\" : [\n\
@@ -386,7 +386,7 @@ H5C__json_write_stop_log_msg(void *udata)
HDassert(json_udata->message);
/* Create the log message string (closes the JSON array) */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -399,7 +399,7 @@ H5C__json_write_stop_log_msg(void *udata)
/* Write the log message to the file */
if(H5C__json_write_log_message(json_udata) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message")
-
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__json_write_stop_log_msg() */
@@ -418,7 +418,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5C__json_write_create_cache_log_msg(void *udata, herr_t fxn_ret_value)
+H5C__json_write_create_cache_log_msg(void *udata, herr_t fxn_ret_value)
{
H5C_log_json_udata_t *json_udata = (H5C_log_json_udata_t *)(udata);
herr_t ret_value = SUCCEED; /* Return value */
@@ -430,7 +430,7 @@ H5C__json_write_create_cache_log_msg(void *udata, herr_t fxn_ret_value)
HDassert(json_udata->message);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -474,7 +474,7 @@ H5C__json_write_destroy_cache_log_msg(void *udata)
HDassert(json_udata->message);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -517,7 +517,7 @@ H5C__json_write_evict_cache_log_msg(void *udata, herr_t fxn_ret_value)
HDassert(json_udata->message);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -562,7 +562,7 @@ H5C__json_write_expunge_entry_log_msg(void *udata, haddr_t address,
HDassert(json_udata->message);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -609,7 +609,7 @@ H5C__json_write_flush_cache_log_msg(void *udata, herr_t fxn_ret_value)
HDassert(json_udata->message);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -654,7 +654,7 @@ H5C__json_write_insert_entry_log_msg(void *udata, haddr_t address,
HDassert(json_udata->message);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -705,7 +705,7 @@ H5C__json_write_mark_entry_dirty_log_msg(void *udata, const H5C_cache_entry_t *e
HDassert(entry);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -752,7 +752,7 @@ H5C__json_write_mark_entry_clean_log_msg(void *udata, const H5C_cache_entry_t *e
HDassert(entry);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -799,7 +799,7 @@ H5C__json_write_mark_unserialized_entry_log_msg(void *udata,
HDassert(entry);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -846,7 +846,7 @@ H5C__json_write_mark_serialized_entry_log_msg(void *udata, const H5C_cache_entry
HDassert(entry);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -892,7 +892,7 @@ H5C__json_write_move_entry_log_msg(void *udata, haddr_t old_addr, haddr_t new_ad
HDassert(json_udata->message);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -903,7 +903,7 @@ H5C__json_write_move_entry_log_msg(void *udata, haddr_t old_addr, haddr_t new_ad
\"returned\":%d\
},\n\
"
- , (long long)HDtime(NULL), (unsigned long)old_addr,
+ , (long long)HDtime(NULL), (unsigned long)old_addr,
(unsigned long)new_addr, type_id, (int)fxn_ret_value);
/* Write the log message to the file */
@@ -942,7 +942,7 @@ H5C__json_write_pin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry,
HDassert(entry);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -951,7 +951,7 @@ H5C__json_write_pin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry,
\"returned\":%d\
},\n\
"
- , (long long)HDtime(NULL), (unsigned long)entry->addr,
+ , (long long)HDtime(NULL), (unsigned long)entry->addr,
(int)fxn_ret_value);
/* Write the log message to the file */
@@ -992,7 +992,7 @@ H5C__json_write_create_fd_log_msg(void *udata, const H5C_cache_entry_t *parent,
HDassert(child);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -1002,7 +1002,7 @@ H5C__json_write_create_fd_log_msg(void *udata, const H5C_cache_entry_t *parent,
\"returned\":%d\
},\n\
"
- , (long long)HDtime(NULL), (unsigned long)parent->addr,
+ , (long long)HDtime(NULL), (unsigned long)parent->addr,
(unsigned long)child->addr, (int)fxn_ret_value);
/* Write the log message to the file */
@@ -1043,11 +1043,11 @@ H5C__json_write_protect_entry_log_msg(void *udata, const H5C_cache_entry_t *entr
if(H5C__READ_ONLY_FLAG == flags)
HDstrcpy(rw_s, "READ");
- else
+ else
HDstrcpy(rw_s, "WRITE");
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -1059,7 +1059,7 @@ H5C__json_write_protect_entry_log_msg(void *udata, const H5C_cache_entry_t *entr
\"returned\":%d\
},\n\
"
- , (long long)HDtime(NULL), (unsigned long)entry->addr,
+ , (long long)HDtime(NULL), (unsigned long)entry->addr,
type_id, rw_s, (int)entry->size, (int)fxn_ret_value);
/* Write the log message to the file */
@@ -1098,7 +1098,7 @@ H5C__json_write_resize_entry_log_msg(void *udata, const H5C_cache_entry_t *entry
HDassert(entry);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -1108,7 +1108,7 @@ H5C__json_write_resize_entry_log_msg(void *udata, const H5C_cache_entry_t *entry
\"returned\":%d\
},\n\
"
- , (long long)HDtime(NULL), (unsigned long)entry->addr,
+ , (long long)HDtime(NULL), (unsigned long)entry->addr,
(int)new_size, (int)fxn_ret_value);
/* Write the log message to the file */
@@ -1147,7 +1147,7 @@ H5C__json_write_unpin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry,
HDassert(entry);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -1156,7 +1156,7 @@ H5C__json_write_unpin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry,
\"returned\":%d\
},\n\
"
- , (long long)HDtime(NULL), (unsigned long)entry->addr,
+ , (long long)HDtime(NULL), (unsigned long)entry->addr,
(int)fxn_ret_value);
/* Write the log message to the file */
@@ -1197,7 +1197,7 @@ H5C__json_write_destroy_fd_log_msg(void *udata, const H5C_cache_entry_t *parent,
HDassert(child);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -1207,7 +1207,7 @@ H5C__json_write_destroy_fd_log_msg(void *udata, const H5C_cache_entry_t *parent,
\"returned\":%d\
},\n\
"
- , (long long)HDtime(NULL), (unsigned long)parent->addr,
+ , (long long)HDtime(NULL), (unsigned long)parent->addr,
(unsigned long)child->addr, (int)fxn_ret_value);
/* Write the log message to the file */
@@ -1245,7 +1245,7 @@ H5C__json_write_unprotect_entry_log_msg(void *udata, haddr_t address,
HDassert(json_udata->message);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -1256,7 +1256,7 @@ H5C__json_write_unprotect_entry_log_msg(void *udata, haddr_t address,
\"returned\":%d\
},\n\
"
- , (long long)HDtime(NULL), (unsigned long)address,
+ , (long long)HDtime(NULL), (unsigned long)address,
type_id, flags, (int)fxn_ret_value);
/* Write the log message to the file */
@@ -1295,7 +1295,7 @@ H5C__json_write_set_cache_config_log_msg(void *udata, const H5AC_cache_config_t
HDassert(config);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -1342,7 +1342,7 @@ H5C__json_write_remove_entry_log_msg(void *udata, const H5C_cache_entry_t *entry
HDassert(entry);
/* Create the log message string */
- HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
+ HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE,
"\
{\
\"timestamp\":%lld,\
@@ -1351,7 +1351,7 @@ H5C__json_write_remove_entry_log_msg(void *udata, const H5C_cache_entry_t *entry
\"returned\":%d\
},\n\
"
- , (long long)HDtime(NULL), (unsigned long)entry->addr,
+ , (long long)HDtime(NULL), (unsigned long)entry->addr,
(int)fxn_ret_value);
/* Write the log message to the file */
diff --git a/src/H5Clog_trace.c b/src/H5Clog_trace.c
index 7c1305c..118d03d 100644
--- a/src/H5Clog_trace.c
+++ b/src/H5Clog_trace.c
@@ -138,7 +138,7 @@ static H5C_log_class_t H5C_trace_log_class_g = {
/*-------------------------------------------------------------------------
* Function: H5C__trace_write_log_message
*
- * Purpose: Write a message to the log file and flush the file.
+ * Purpose: Write a message to the log file and flush the file.
* The message string is neither modified nor freed.
*
* Return: SUCCEED/FAIL
@@ -166,7 +166,7 @@ H5C__trace_write_log_message(H5C_log_trace_udata_t *trace_udata)
if((int)n_chars != HDfprintf(trace_udata->outfile, trace_udata->message))
HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "error writing log message")
HDmemset((void *)(trace_udata->message), 0, (size_t)(n_chars * sizeof(char)));
-
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__trace_write_log_message() */
@@ -217,7 +217,7 @@ H5C_log_trace_set_up(H5C_log_info_t *log_info, const char log_location[], int mp
if(NULL == (log_info->udata = H5MM_calloc(sizeof(H5C_log_trace_udata_t))))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed")
trace_udata = (H5C_log_trace_udata_t *)(log_info->udata);
-
+
/* Allocate memory for the message buffer */
if(NULL == (trace_udata->message = (char *)H5MM_calloc(H5C_MAX_TRACE_LOG_MSG_SIZE * sizeof(char))))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed")
@@ -340,7 +340,7 @@ H5C__trace_write_expunge_entry_log_msg(void *udata, haddr_t address,
HDassert(trace_udata->message);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_expunge_entry 0x%lx %d %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_expunge_entry 0x%lx %d %d\n",
(unsigned long)address, type_id, (int)fxn_ret_value);
/* Write the log message to the file */
@@ -377,7 +377,7 @@ H5C__trace_write_flush_cache_log_msg(void *udata, herr_t fxn_ret_value)
HDassert(trace_udata->message);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_flush %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_flush %d\n",
(int)fxn_ret_value);
/* Write the log message to the file */
@@ -415,7 +415,7 @@ H5C__trace_write_insert_entry_log_msg(void *udata, haddr_t address,
HDassert(trace_udata->message);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_insert_entry 0x%lx %d 0x%x %d %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_insert_entry 0x%lx %d 0x%x %d %d\n",
(unsigned long)address, type_id, flags, (int)size, (int)fxn_ret_value);
/* Write the log message to the file */
@@ -454,7 +454,7 @@ H5C__trace_write_mark_entry_dirty_log_msg(void *udata, const H5C_cache_entry_t *
HDassert(entry);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_dirty 0x%lx %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_dirty 0x%lx %d\n",
(unsigned long)(entry->addr), (int)fxn_ret_value);
/* Write the log message to the file */
@@ -493,7 +493,7 @@ H5C__trace_write_mark_entry_clean_log_msg(void *udata, const H5C_cache_entry_t *
HDassert(entry);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_clean 0x%lx %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_clean 0x%lx %d\n",
(unsigned long)(entry->addr), (int)fxn_ret_value);
/* Write the log message to the file */
@@ -532,7 +532,7 @@ H5C__trace_write_mark_unserialized_entry_log_msg(void *udata,
HDassert(entry);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_unserialized 0x%lx %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_unserialized 0x%lx %d\n",
(unsigned long)(entry->addr), (int)fxn_ret_value);
/* Write the log message to the file */
@@ -571,7 +571,7 @@ H5C__trace_write_mark_serialized_entry_log_msg(void *udata, const H5C_cache_entr
HDassert(entry);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_serialized 0x%lx %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_serialized 0x%lx %d\n",
(unsigned long)(entry->addr), (int)fxn_ret_value);
/* Write the log message to the file */
@@ -609,7 +609,7 @@ H5C__trace_write_move_entry_log_msg(void *udata, haddr_t old_addr, haddr_t new_a
HDassert(trace_udata->message);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_move_entry 0x%lx 0x%lx %d %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_move_entry 0x%lx 0x%lx %d %d\n",
(unsigned long)old_addr, (unsigned long)new_addr, type_id, (int)fxn_ret_value);
/* Write the log message to the file */
@@ -648,7 +648,7 @@ H5C__trace_write_pin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry,
HDassert(entry);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_pin_protected_entry 0x%lx %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_pin_protected_entry 0x%lx %d\n",
(unsigned long)(entry->addr), (int)fxn_ret_value);
/* Write the log message to the file */
@@ -689,7 +689,7 @@ H5C__trace_write_create_fd_log_msg(void *udata, const H5C_cache_entry_t *parent,
HDassert(child);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_create_flush_dependency 0x%lx 0x%lx %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_create_flush_dependency 0x%lx 0x%lx %d\n",
(unsigned long)(parent->addr), (unsigned long)(child->addr), (int)fxn_ret_value);
/* Write the log message to the file */
@@ -728,7 +728,7 @@ H5C__trace_write_protect_entry_log_msg(void *udata, const H5C_cache_entry_t *ent
HDassert(entry);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_protect 0x%lx %d 0x%x %d %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_protect 0x%lx %d 0x%x %d %d\n",
(unsigned long)(entry->addr), type_id, flags, (int)(entry->size), (int)fxn_ret_value);
/* Write the log message to the file */
@@ -767,7 +767,7 @@ H5C__trace_write_resize_entry_log_msg(void *udata, const H5C_cache_entry_t *entr
HDassert(entry);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_resize_entry 0x%lx %d %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_resize_entry 0x%lx %d %d\n",
(unsigned long)(entry->addr), (int)new_size, (int)fxn_ret_value);
/* Write the log message to the file */
@@ -806,7 +806,7 @@ H5C__trace_write_unpin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry
HDassert(entry);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_unpin_entry 0x%lx %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_unpin_entry 0x%lx %d\n",
(unsigned long)(entry->addr), (int)fxn_ret_value);
/* Write the log message to the file */
@@ -847,7 +847,7 @@ H5C__trace_write_destroy_fd_log_msg(void *udata, const H5C_cache_entry_t *parent
HDassert(child);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_destroy_flush_dependency 0x%lx 0x%lx %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_destroy_flush_dependency 0x%lx 0x%lx %d\n",
(unsigned long)(parent->addr), (unsigned long)(child->addr), (int)fxn_ret_value);
/* Write the log message to the file */
@@ -885,7 +885,7 @@ H5C__trace_write_unprotect_entry_log_msg(void *udata, haddr_t address,
HDassert(trace_udata->message);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_unprotect 0x%lx %d 0x%x %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_unprotect 0x%lx %d 0x%x %d\n",
(unsigned long)(address), type_id, flags, (int)fxn_ret_value);
/* Write the log message to the file */
@@ -994,7 +994,7 @@ H5C__trace_write_remove_entry_log_msg(void *udata, const H5C_cache_entry_t *entr
HDassert(entry);
/* Create the log message string */
- HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_remove_entry 0x%lx %d\n",
+ HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_remove_entry 0x%lx %d\n",
(unsigned long)(entry->addr), (int)fxn_ret_value);
/* Write the log message to the file */
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index b8648f0..779f289 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -149,8 +149,8 @@
*
* JRM - 9/8/05
*
- * - Added macros supporting the index list -- a doubly liked list of
- * all entries in the index. This list is necessary to reduce the
+ * - Added macros supporting the index list -- a doubly liked list of
+ * all entries in the index. This list is necessary to reduce the
* cost of visiting all entries in the cache, which was previously
* done via a scan of the hash table.
*
@@ -963,16 +963,16 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
*
* JRM -- 11/5/08
*
- * - Updated existing index macros and sanity check macros to maintain
+ * - Updated existing index macros and sanity check macros to maintain
* the index_ring_len, index_ring_size, clean_index_ring_size, and
* dirty_index_ring_size fields of H5C_t.
*
* JRM -- 9/1/15
*
- * - Updated existing index macros and sanity checks macros to
+ * - Updated existing index macros and sanity checks macros to
* maintain an doubly linked list of all entries in the index.
* This is necessary to reduce the computational cost of visiting
- * all entries in the index, which used to be done by scanning
+ * all entries in the index, which used to be done by scanning
* the hash table.
*
* JRM -- 10/15/15
@@ -1561,7 +1561,7 @@ if ( ( (cache_ptr)->index_size != \
* flush.
*
* JRM -- 12/13/14
- * Added code to set cache_ptr->slist_changed to TRUE
+ * Added code to set cache_ptr->slist_changed to TRUE
* when an entry is inserted in the slist.
*
* JRM -- 9/1/15
@@ -1762,7 +1762,7 @@ if ( ( (cache_ptr)->index_size != \
* flush.
*
* JRM -- 12/13/14
- * Note that we do not set cache_ptr->slist_changed to TRUE
+ * Note that we do not set cache_ptr->slist_changed to TRUE
* in this case, as the structure of the slist is not
* modified.
*
@@ -2243,14 +2243,14 @@ if ( ( (cache_ptr)->index_size != \
* Macro: H5C__UPDATE_RP_FOR_INSERT_APPEND
*
* Purpose: Update the replacement policy data structures for an
- * insertion of the specified cache entry.
+ * insertion of the specified cache entry.
*
- * Unlike H5C__UPDATE_RP_FOR_INSERTION below, mark the
- * new entry as the LEAST recently used entry, not the
- * most recently used.
+ * Unlike H5C__UPDATE_RP_FOR_INSERTION below, mark the
+ * new entry as the LEAST recently used entry, not the
+ * most recently used.
*
- * For now at least, this macro should only be used in
- * the reconstruction of the metadata cache from a cache
+ * For now at least, this macro should only be used in
+ * the reconstruction of the metadata cache from a cache
* image block.
*
* At present, we only support the modified LRU policy, so
@@ -3491,7 +3491,7 @@ typedef struct H5C_tag_info_t {
*
* JRM - 9/26/05
*
- * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC.
+ * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC.
* This field is used to validate pointers to instances of
* H5C_t.
*
@@ -3544,7 +3544,7 @@ typedef struct H5C_tag_info_t {
* clean data so as to avoid case b) above. Again, this is
* a soft limit.
*
- * close_warning_received: Boolean flag indicating that a file closing
+ * close_warning_received: Boolean flag indicating that a file closing
* warning has been received.
*
*
@@ -3576,7 +3576,7 @@ typedef struct H5C_tag_info_t {
* this flag is set to FALSE, the metadata cache will not
* attempt to evict entries to make space for newly protected
* entries, and instead the will grow without limit.
- *
+ *
* Needless to say, this feature must be used with care.
*
*
@@ -3586,12 +3586,12 @@ typedef struct H5C_tag_info_t {
* Addendum: JRM -- 10/14/15
*
* We sometimes need to visit all entries in the cache. In the past, this
- * was done by scanning the hash table. However, this is expensive, and
- * we have come to scan the hash table often enough that it has become a
- * performance issue. To repair this, I have added code to maintain a
- * list of all entries in the index -- call this list the index list.
+ * was done by scanning the hash table. However, this is expensive, and
+ * we have come to scan the hash table often enough that it has become a
+ * performance issue. To repair this, I have added code to maintain a
+ * list of all entries in the index -- call this list the index list.
*
- * The index list is maintained by the same macros that maintain the
+ * The index list is maintained by the same macros that maintain the
* index, and must have the same length and size as the index proper.
*
* index_len: Number of entries currently in the hash table used to index
@@ -3606,12 +3606,12 @@ typedef struct H5C_tag_info_t {
* index_size by three should yield a conservative estimate
* of the cache's memory footprint.
*
- * index_ring_len: Array of integer of length H5C_RING_NTYPES used to
- * maintain a count of entries in the index by ring. Note
- * that the sum of all the cells in this array must equal
+ * index_ring_len: Array of integer of length H5C_RING_NTYPES used to
+ * maintain a count of entries in the index by ring. Note
+ * that the sum of all the cells in this array must equal
* the value stored in index_len above.
*
- * index_ring_size: Array of size_t of length H5C_RING_NTYPES used to
+ * index_ring_size: Array of size_t of length H5C_RING_NTYPES used to
* maintain the sum of the sizes of all entries in the index
* by ring. Note that the sum of all cells in this array must
* equal the value stored in index_size above.
@@ -3631,8 +3631,8 @@ typedef struct H5C_tag_info_t {
* in the cache.
*
* clean_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to
- * maintain the sum of the sizes of all clean entries in the
- * index by ring. Note that the sum of all cells in this array
+ * maintain the sum of the sizes of all clean entries in the
+ * index by ring. Note that the sum of all cells in this array
* must equal the value stored in clean_index_size above.
*
* dirty_index_size: Number of bytes of dirty entries currently stored in
@@ -3642,8 +3642,8 @@ typedef struct H5C_tag_info_t {
* dirty_index_size == index_size.
*
* dirty_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to
- * maintain the sum of the sizes of all dirty entries in the
- * index by ring. Note that the sum of all cells in this array
+ * maintain the sum of the sizes of all dirty entries in the
+ * index by ring. Note that the sum of all cells in this array
* must equal the value stored in dirty_index_size above.
*
* index: Array of pointer to H5C_cache_entry_t of size
@@ -3661,52 +3661,52 @@ typedef struct H5C_tag_info_t {
* changing the H5C__HASH_FCN macro and the deletion of the
* H5C__HASH_MASK #define. No other changes should be required.
*
- * il_len: Number of entries on the index list.
+ * il_len: Number of entries on the index list.
*
- * This must always be equal to index_len. As such, this
- * field is redundant. However, the existing linked list
- * management macros expect to maintain a length field, so
+ * This must always be equal to index_len. As such, this
+ * field is redundant. However, the existing linked list
+ * management macros expect to maintain a length field, so
* this field exists primarily to avoid adding complexity to
* these macros.
*
* il_size: Number of bytes of cache entries currently stored in the
* index list.
*
- * This must always be equal to index_size. As such, this
- * field is redundant. However, the existing linked list
- * management macros expect to maintain a size field, so
+ * This must always be equal to index_size. As such, this
+ * field is redundant. However, the existing linked list
+ * management macros expect to maintain a size field, so
* this field exists primarily to avoid adding complexity to
* these macros.
*
* il_head: Pointer to the head of the doubly linked list of entries in
- * the index list. Note that cache entries on this list are
+ * the index list. Note that cache entries on this list are
* linked by their il_next and il_prev fields.
*
* This field is NULL if the index is empty.
*
* il_tail: Pointer to the tail of the doubly linked list of entries in
- * the index list. Note that cache entries on this list are
+ * the index list. Note that cache entries on this list are
* linked by their il_next and il_prev fields.
*
* This field is NULL if the index is empty.
*
*
- * With the addition of the take ownership flag, it is possible that
- * an entry may be removed from the cache as the result of the flush of
- * a second entry. In general, this causes little trouble, but it is
- * possible that the entry removed may be the next entry in the scan of
- * a list. In this case, we must be able to detect the fact that the
+ * With the addition of the take ownership flag, it is possible that
+ * an entry may be removed from the cache as the result of the flush of
+ * a second entry. In general, this causes little trouble, but it is
+ * possible that the entry removed may be the next entry in the scan of
+ * a list. In this case, we must be able to detect the fact that the
* entry has been removed, so that the scan doesn't attempt to proceed with
* an entry that is no longer in the cache.
*
* The following fields are maintained to facilitate this.
*
* entries_removed_counter: Counter that is incremented each time an
- * entry is removed from the cache by any means (eviction,
+ * entry is removed from the cache by any means (eviction,
* expungement, or take ownership at this point in time).
* Functions that perform scans on lists may set this field
- * to zero prior to calling H5C__flush_single_entry().
- * Unexpected changes to the counter indicate that an entry
+ * to zero prior to calling H5C__flush_single_entry().
+ * Unexpected changes to the counter indicate that an entry
* was removed from the cache as a side effect of the flush.
*
* last_entry_removed_ptr: Pointer to the instance of H5C_cache_entry_t
@@ -3715,11 +3715,11 @@ typedef struct H5C_tag_info_t {
* performing a scan of a list has set this field to NULL prior
* to calling H5C__flush_single_entry().
*
- * WARNING!!! This field must NEVER be dereferenced. It is
+ * WARNING!!! This field must NEVER be dereferenced. It is
* maintained to allow functions that perform scans of lists
* to compare this pointer with their pointers to next, thus
* allowing them to avoid unnecessary restarts of scans if the
- * pointers don't match, and if entries_removed_counter is
+ * pointers don't match, and if entries_removed_counter is
* one.
*
* entry_watched_for_removal: Pointer to an instance of H5C_cache_entry_t
@@ -3740,11 +3740,11 @@ typedef struct H5C_tag_info_t {
* are flushed. (this has been changed -- dirty entries are now removed from
* the skip list as they are flushed. JRM - 10/25/05)
*
- * slist_changed: Boolean flag used to indicate whether the contents of
+ * slist_changed: Boolean flag used to indicate whether the contents of
* the slist has changed since the last time this flag was
- * reset. This is used in the cache flush code to detect
+ * reset. This is used in the cache flush code to detect
* conditions in which pre-serialize or serialize callbacks
- * have modified the slist -- which obliges us to restart
+ * have modified the slist -- which obliges us to restart
* the scan of the slist from the beginning.
*
* slist_len: Number of entries currently in the skip list
@@ -3755,14 +3755,14 @@ typedef struct H5C_tag_info_t {
* skip list used to maintain a sorted list of
* dirty entries in the cache.
*
- * slist_ring_len: Array of integer of length H5C_RING_NTYPES used to
- * maintain a count of entries in the slist by ring. Note
- * that the sum of all the cells in this array must equal
+ * slist_ring_len: Array of integer of length H5C_RING_NTYPES used to
+ * maintain a count of entries in the slist by ring. Note
+ * that the sum of all the cells in this array must equal
* the value stored in slist_len above.
*
* slist_ring_size: Array of size_t of length H5C_RING_NTYPES used to
- * maintain the sum of the sizes of all entries in the
- * slist by ring. Note that the sum of all cells in this
+ * maintain the sum of the sizes of all entries in the
+ * slist by ring. Note that the sum of all cells in this
* array must equal the value stored in slist_size above.
*
* slist_ptr: pointer to the instance of H5SL_t used maintain a sorted
@@ -3826,7 +3826,7 @@ typedef struct H5C_tag_info_t {
* num_objs_corked: Unsigned integer field containing the number of objects
* that are "corked". The "corked" status of an object is
* found by searching the "tag_list". This field is added
- * for optimization so that the skip list search on "tag_list"
+ * for optimization so that the skip list search on "tag_list"
* can be skipped if this field is zero, i.e. no "corked"
* objects.
*
@@ -3867,7 +3867,7 @@ typedef struct H5C_tag_info_t {
*
* 2) A pinned entry can be accessed or modified at any time.
* This places an additional burden on the associated pre-serialize
- * and serialize callbacks, which must ensure the the entry is in
+ * and serialize callbacks, which must ensure the the entry is in
* a consistent state before creating an image of it.
*
* 3) A pinned entry can be marked as dirty (and possibly
@@ -3878,7 +3878,7 @@ typedef struct H5C_tag_info_t {
* flush.
*
* Since pinned entries cannot be evicted, they must be kept on a pinned
- * entry list (pel), instead of being entrusted to the replacement policy
+ * entry list (pel), instead of being entrusted to the replacement policy
* code.
*
* Maintaining the pinned entry list requires the following fields:
@@ -3907,7 +3907,7 @@ typedef struct H5C_tag_info_t {
*
* While there has been interest in several replacement policies for
* this cache, the initial development schedule is tight. Thus I have
- * elected to support only a modified LRU (least recently used) policy
+ * elected to support only a modified LRU (least recently used) policy
* for the first cut.
*
* To further simplify matters, I have simply included the fields needed
@@ -3926,7 +3926,7 @@ typedef struct H5C_tag_info_t {
* be collective and the other processes will not know to participate.
*
* To deal with this issue, I have modified the usual LRU policy by adding
- * clean and dirty LRU lists to the usual LRU list. In general, these
+ * clean and dirty LRU lists to the usual LRU list. In general, these
* lists are only exist in parallel builds.
*
* The clean LRU list is simply the regular LRU list with all dirty cache
@@ -3954,13 +3954,13 @@ typedef struct H5C_tag_info_t {
*
* LRU_list_len: Number of cache entries currently on the LRU list.
*
- * Observe that LRU_list_len + pl_len + pel_len must always
+ * Observe that LRU_list_len + pl_len + pel_len must always
* equal index_len.
*
* LRU_list_size: Number of bytes of cache entries currently residing on the
* LRU list.
*
- * Observe that LRU_list_size + pl_size + pel_size must always
+ * Observe that LRU_list_size + pl_size + pel_size must always
* equal index_size.
*
* LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache
@@ -4071,25 +4071,25 @@ typedef struct H5C_tag_info_t {
* size is decreased. The flag triggers a call to
* H5C__make_space_in_cache() on the next call to H5C_protect().
*
- * resize_in_progress: As the metadata cache has become re-entrant, it is
- * possible that a protect may trigger a call to
+ * resize_in_progress: As the metadata cache has become re-entrant, it is
+ * possible that a protect may trigger a call to
* H5C__auto_adjust_cache_size(), which may trigger a flush,
- * which may trigger a protect, which will result in another
- * call to H5C__auto_adjust_cache_size().
+ * which may trigger a protect, which will result in another
+ * call to H5C__auto_adjust_cache_size().
*
* The resize_in_progress boolean flag is used to detect this,
* and to prevent the infinite recursion that would otherwise
* occur.
*
- * Note that this issue is not hypothetical -- this field
- * was added 12/29/15 to fix a bug exposed in the testing
+ * Note that this issue is not hypothetical -- this field
+ * was added 12/29/15 to fix a bug exposed in the testing
* of changes to the file driver info superblock extension
* management code needed to support rings.
*
* msic_in_progress: As the metadata cache has become re-entrant, and as
- * the free space manager code has become more tightly
- * integrated with the metadata cache, it is possible that
- * a call to H5C_insert_entry() may trigger a call to
+ * the free space manager code has become more tightly
+ * integrated with the metadata cache, it is possible that
+ * a call to H5C_insert_entry() may trigger a call to
* H5C_make_space_in_cache(), which, via H5C__flush_single_entry()
* and client callbacks, may trigger an infinite regression
* of calls to H5C_make_space_in_cache().
@@ -4098,9 +4098,9 @@ typedef struct H5C_tag_info_t {
* and prevent the infinite regression that would otherwise
* occur.
*
- * Note that this is issue is not hypothetical -- this field
- * was added 2/16/17 to address this issue when it was
- * exposed by modifications to test/fheap.c to cause it to
+ * Note that this is issue is not hypothetical -- this field
+ * was added 2/16/17 to address this issue when it was
+ * exposed by modifications to test/fheap.c to cause it to
* use paged allocation.
*
* resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
@@ -4184,45 +4184,45 @@ typedef struct H5C_tag_info_t {
* data for generation of a cache image on file close.
*
* serialization_in_progress: Boolean field that is set to TRUE iff
- * the cache is in the process of being serialized. This
+ * the cache is in the process of being serialized. This
* field is needed to support the H5C_serialization_in_progress()
* call, which is in turn required for sanity checks in some
* cache clients.
*
- * load_image: Boolean flag indicating that the metadata cache image
- * superblock extension message exists and should be
+ * load_image: Boolean flag indicating that the metadata cache image
+ * superblock extension message exists and should be
* read, and the image block read and decoded on the next
- * call to H5C_protect().
+ * call to H5C_protect().
*
- * image_loaded: Boolean flag indicating that the metadata cache has
- * loaded the metadata cache image as directed by the
+ * image_loaded: Boolean flag indicating that the metadata cache has
+ * loaded the metadata cache image as directed by the
* MDC cache image superblock extension message.
*
* delete_image: Boolean flag indicating whether the metadata cache image
* superblock message should be deleted and the cache image
* file space freed after they have been read and decoded.
*
- * This flag should be set to TRUE iff the file is opened
+ * This flag should be set to TRUE iff the file is opened
* R/W and there is a cache image to be read.
*
- * image_addr: haddr_t containing the base address of the on disk
- * metadata cache image, or HADDR_UNDEF if that value is
- * undefined. Note that this field is used both in the
- * construction and write, and the read and decode of
+ * image_addr: haddr_t containing the base address of the on disk
+ * metadata cache image, or HADDR_UNDEF if that value is
+ * undefined. Note that this field is used both in the
+ * construction and write, and the read and decode of
* metadata cache image blocks.
*
- * image_len: hsize_t containing the size of the on disk metadata cache
- * image, or zero if that value is undefined. Note that this
- * field is used both in the construction and write, and the
+ * image_len: hsize_t containing the size of the on disk metadata cache
+ * image, or zero if that value is undefined. Note that this
+ * field is used both in the construction and write, and the
* read and decode of metadata cache image blocks.
*
- * image_data_len: size_t containing the number of bytes of data in the
- * on disk metadata cache image, or zero if that value is
+ * image_data_len: size_t containing the number of bytes of data in the
+ * on disk metadata cache image, or zero if that value is
* undefined.
*
* In most cases, this value is the same as the image_len
* above. It exists to allow for metadata cache image blocks
- * that are larger than the actual image. Thus in all
+ * that are larger than the actual image. Thus in all
* cases image_data_len <= image_len.
*
* To create the metadata cache image, we must first serialize all the
@@ -4232,27 +4232,27 @@ typedef struct H5C_tag_info_t {
* height in increasing order.
*
* This operation is complicated by the fact that entries other the the
- * target may be inserted, loaded, relocated, or removed from the cache
- * (either by eviction or the take ownership flag) as the result of a
- * pre_serialize or serialize callback. While entry removals are not
+ * target may be inserted, loaded, relocated, or removed from the cache
+ * (either by eviction or the take ownership flag) as the result of a
+ * pre_serialize or serialize callback. While entry removals are not
* a problem for the scan of the index, insertions, loads, and relocations
- * are. Hence the entries loaded, inserted, and relocated counters
- * listed below have been implemented to allow these conditions to be
+ * are. Hence the entries loaded, inserted, and relocated counters
+ * listed below have been implemented to allow these conditions to be
* detected and dealt with by restarting the scan.
*
- * The serialization operation is further complicated by the fact that
- * the flush dependency height of a given entry may increase (as the
- * result of an entry load or insert) or decrease (as the result of an
+ * The serialization operation is further complicated by the fact that
+ * the flush dependency height of a given entry may increase (as the
+ * result of an entry load or insert) or decrease (as the result of an
* entry removal -- via either eviction or the take ownership flag). The
* entry_fd_height_change_counter field is maintained to allow detection
* of this condition, and a restart of the scan when it occurs.
*
* Note that all these new fields would work just as well as booleans.
*
- * entries_loaded_counter: Number of entries loaded into the cache
+ * entries_loaded_counter: Number of entries loaded into the cache
* since the last time this field was reset.
*
- * entries_inserted_counter: Number of entries inserted into the cache
+ * entries_inserted_counter: Number of entries inserted into the cache
* since the last time this field was reset.
*
* entries relocated_counter: Number of entries whose base address has
@@ -4261,32 +4261,32 @@ typedef struct H5C_tag_info_t {
* entry_fd_height_change_counter: Number of entries whose flush dependency
* height has changed since the last time this field was reset.
*
- * The following fields are used assemble the cache image prior to
+ * The following fields are used assemble the cache image prior to
* writing it to disk.
*
* num_entries_in_image: Unsigned integer field containing the number of entries
- * to be copied into the metadata cache image. Note that
- * this value will be less than the number of entries in
- * the cache, and the superblock and its related entries
+ * to be copied into the metadata cache image. Note that
+ * this value will be less than the number of entries in
+ * the cache, and the superblock and its related entries
* are not written to the metadata cache image.
*
* image_entries: Pointer to a dynamically allocated array of instance of
* H5C_image_entry_t of length num_entries_in_image, or NULL
* if that array does not exist. This array is used to
- * assemble entry data to be included in the image, and to
+ * assemble entry data to be included in the image, and to
* sort them by flush dependency height and LRU rank.
- *
+ *
* image_buffer: Pointer to the dynamically allocated buffer of length
- * image_len in which the metadata cache image is assembled,
+ * image_len in which the metadata cache image is assembled,
* or NULL if that buffer does not exist.
*
*
* Free Space Manager Related fields:
*
- * The free space managers must be informed when we are about to close
+ * The free space managers must be informed when we are about to close
* or flush the file so that they order themselves accordingly. This used
- * to be done much later in the close process, but with cache image and
- * page buffering, this is no longer viable, as we must finalize the on
+ * to be done much later in the close process, but with cache image and
+ * page buffering, this is no longer viable, as we must finalize the on
* disk image of all metadata much sooner.
*
* This is handled by the H5MF_settle_raw_data_fsm() and
@@ -4300,11 +4300,11 @@ typedef struct H5C_tag_info_t {
* flush is complete.
*
* rdfsm_settled: Boolean flag indicating whether the raw data free space
- * manager is settled -- i.e. whether the correct space has
+ * manager is settled -- i.e. whether the correct space has
* been allocated for it in the file.
*
- * Note that the name of this field is deceptive. In the
- * multi file case, the flag applies to all free space
+ * Note that the name of this field is deceptive. In the
+ * multi file case, the flag applies to all free space
* managers that are not involved in allocating space for
* free space manager metadata.
*
@@ -4312,9 +4312,9 @@ typedef struct H5C_tag_info_t {
* manager is settled -- i.e. whether the correct space has
* been allocated for it in the file.
*
- * Note that the name of this field is deceptive. In the
- * multi file case, the flag applies only to free space
- * managers that are involved in allocating space for free
+ * Note that the name of this field is deceptive. In the
+ * multi file case, the flag applies only to free space
+ * managers that are involved in allocating space for free
* space managers.
*
*
@@ -4358,7 +4358,7 @@ typedef struct H5C_tag_info_t {
* id equal to the array index has been inserted into the
* cache in the current epoch.
*
- * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
* The cells are used to record the number of times an entry
* with type id equal to the array index has been inserted
* pinned into the cache in the current epoch.
@@ -4378,9 +4378,9 @@ typedef struct H5C_tag_info_t {
* equal to the array index has been evicted from the cache in
* the current epoch.
*
- * take_ownerships: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry with
- * type id equal to the array index has been removed from the
+ * take_ownerships: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry with
+ * type id equal to the array index has been removed from the
* cache via the H5C__TAKE_OWNERSHIP_FLAG in the current epoch.
*
* moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
@@ -4388,12 +4388,12 @@ typedef struct H5C_tag_info_t {
* id equal to the array index has been moved in the current
* epoch.
*
- * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
* The cells are used to record the number of times an entry
* with type id equal to the array index has been moved
* during its pre-serialize callback in the current epoch.
*
- * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
* The cells are used to record the number of times an entry
* with type id equal to the array index has been moved
* during a cache flush in the current epoch.
@@ -4436,7 +4436,7 @@ typedef struct H5C_tag_info_t {
* entry_flush_size_changes: Array of int64 of length
* H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
* the number of times an entry with type id equal to the
- * array index has changed size while in its pre-serialize
+ * array index has changed size while in its pre-serialize
* callback.
*
* cache_flush_size_changes: Array of int64 of length
@@ -4500,7 +4500,7 @@ typedef struct H5C_tag_info_t {
* enforcing the min_clean_fraction in H5C__make_space_in_cache().
*
* total_dirty_pf_entries_skipped_in_msic: Number of dirty prefetched entries
- * skipped in H5C__make_space_in_cache(). Note that this can
+ * skipped in H5C__make_space_in_cache(). Note that this can
* only occur when a file is opened R/O with a cache image
* containing dirty entries.
*
@@ -4512,7 +4512,7 @@ typedef struct H5C_tag_info_t {
*
* max_dirty_pf_entries_skipped_in_msic: Maximum number of dirty prefetched
* entries skipped in any one call to H5C__make_space_in_cache().
- * Note that this can only occur when the file is opened
+ * Note that this can only occur when the file is opened
* R/O with a cache image containing dirty entries.
*
* max_entries_scanned_in_msic: Maximum number of entries scanned over
@@ -4522,24 +4522,24 @@ typedef struct H5C_tag_info_t {
* for entries to evict in order to make space in cache.
*
*
- * The following fields track statistics on cache images.
+ * The following fields track statistics on cache images.
*
* images_created: Integer field containing the number of cache images
- * created since the last time statistics were reset.
+ * created since the last time statistics were reset.
*
* At present, this field must always be either 0 or 1.
- * Further, since cache images are only created at file
+ * Further, since cache images are only created at file
* close, this field should only be set at that time.
*
- * images_read: Integer field containing the number of cache images
+ * images_read: Integer field containing the number of cache images
* read from file. Note that reading an image is different
* from loading it -- reading the image means just that,
* while loading the image refers to decoding it and loading
* it into the metadata cache.
*
- * In the serial case, image_read should always equal
- * images_loaded. However, in the parallel case, the
- * image should only be read by process 0. All other
+ * In the serial case, image_read should always equal
+ * images_loaded. However, in the parallel case, the
+ * image should only be read by process 0. All other
* processes should receive the cache image via a broadcast
* from process 0.
*
@@ -4547,25 +4547,25 @@ typedef struct H5C_tag_info_t {
* loaded since the last time statistics were reset.
*
* At present, this field must always be either 0 or 1.
- * Further, since cache images are only loaded at the
+ * Further, since cache images are only loaded at the
* time of the first protect or on file close, this value
* should only change on those events.
*
* last_image_size: Size of the most recently loaded metadata cache image
* loaded into the cache, or zero if no image has been
- * loaded.
+ * loaded.
*
- * At present, at most one cache image can be loaded into
+ * At present, at most one cache image can be loaded into
* the metadata cache for any given file, and this image
* will be loaded either on the first protect, or on file
* close if no entry is protected before then.
*
*
* Fields for tracking prefetched entries. Note that flushes and evictions
- * of prefetched entries are tracked in the flushes and evictions arrays
+ * of prefetched entries are tracked in the flushes and evictions arrays
* discused above.
*
- * prefetches: Number of prefetched entries that are loaded to the
+ * prefetches: Number of prefetched entries that are loaded to the
* cache.
*
* dirty_prefetches: Number of dirty prefetched entries that are loaded
@@ -4573,30 +4573,30 @@ typedef struct H5C_tag_info_t {
*
* prefetch_hits: Number of prefetched entries that are actually used.
*
- *
- * As entries are now capable of moving, loading, dirtying, and deleting
- * other entries in their pre_serialize and serialize callbacks, it has
- * been necessary to insert code to restart scans of lists so as to avoid
- * improper behavior if the next entry in the list is the target of one on
+ *
+ * As entries are now capable of moving, loading, dirtying, and deleting
+ * other entries in their pre_serialize and serialize callbacks, it has
+ * been necessary to insert code to restart scans of lists so as to avoid
+ * improper behavior if the next entry in the list is the target of one on
* these operations.
*
- * The following fields are use to count such occurrences. They are used
- * both in tests (to verify that the scan has been restarted), and to
+ * The following fields are use to count such occurrences. They are used
+ * both in tests (to verify that the scan has been restarted), and to
* obtain estimates of how frequently these restarts occur.
*
* slist_scan_restarts: Number of times a scan of the slist (that contains
- * calls to H5C__flush_single_entry()) has been restarted to
- * avoid potential issues with change of status of the next
+ * calls to H5C__flush_single_entry()) has been restarted to
+ * avoid potential issues with change of status of the next
* entry in the scan.
*
* LRU_scan_restarts: Number of times a scan of the LRU list (that contains
- * calls to H5C__flush_single_entry()) has been restarted to
- * avoid potential issues with change of status of the next
+ * calls to H5C__flush_single_entry()) has been restarted to
+ * avoid potential issues with change of status of the next
* entry in the scan.
*
- * index_scan_restarts: Number of times a scan of the index has been
+ * index_scan_restarts: Number of times a scan of the index has been
* restarted to avoid potential issues with load, insertion
- * or change in flush dependency height of an entry other
+ * or change in flush dependency height of an entry other
* than the target entry as the result of call(s) to the
* pre_serialize or serialize callbacks.
*
@@ -4850,7 +4850,7 @@ struct H5C_t {
int32_t max_dirty_pf_entries_skipped_in_msic;
int32_t max_entries_scanned_in_msic;
int64_t entries_scanned_to_make_space;
-
+
/* Fields for tracking skip list scan restarts */
int64_t slist_scan_restarts;
int64_t LRU_scan_restarts;
diff --git a/src/H5Cprefetched.c b/src/H5Cprefetched.c
index 0befdf9..954dd60 100644
--- a/src/H5Cprefetched.c
+++ b/src/H5Cprefetched.c
@@ -66,7 +66,7 @@ static herr_t H5C__prefetched_entry_pre_serialize(H5F_t *f, void *thing,
unsigned *flags_ptr);
static herr_t H5C__prefetched_entry_serialize(const H5F_t *f, void *image_ptr,
size_t len, void *thing);
-static herr_t H5C__prefetched_entry_notify(H5C_notify_action_t action,
+static herr_t H5C__prefetched_entry_notify(H5C_notify_action_t action,
void *thing);
static herr_t H5C__prefetched_entry_free_icr(void *thing);
static herr_t H5C__prefetched_entry_fsf_size(const void *thing,
@@ -111,12 +111,12 @@ const H5AC_class_t H5AC_PREFETCHED_ENTRY[1] = {{
/***************************************************************************
- * With two exceptions, these functions should never be called, and thus
+ * With two exceptions, these functions should never be called, and thus
* there is little point in documenting them separately as they all simply
* throw an error.
*
* See header comments for the two exceptions (free_icr and notify).
- *
+ *
* JRM - 8/13/15
*
***************************************************************************/
@@ -157,8 +157,8 @@ H5C__prefetched_entry_verify_chksum(const void H5_ATTR_UNUSED *image_ptr,
static void *
-H5C__prefetched_entry_deserialize(const void H5_ATTR_UNUSED * image_ptr,
- size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED * udata,
+H5C__prefetched_entry_deserialize(const void H5_ATTR_UNUSED * image_ptr,
+ size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED * udata,
hbool_t H5_ATTR_UNUSED * dirty_ptr)
{
FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
@@ -182,9 +182,9 @@ H5C__prefetched_entry_image_len(const void H5_ATTR_UNUSED *thing,
static herr_t
-H5C__prefetched_entry_pre_serialize(H5F_t H5_ATTR_UNUSED *f, void H5_ATTR_UNUSED *thing,
+H5C__prefetched_entry_pre_serialize(H5F_t H5_ATTR_UNUSED *f, void H5_ATTR_UNUSED *thing,
haddr_t H5_ATTR_UNUSED addr, size_t H5_ATTR_UNUSED len,
- haddr_t H5_ATTR_UNUSED *new_addr_ptr, size_t H5_ATTR_UNUSED *new_len_ptr,
+ haddr_t H5_ATTR_UNUSED *new_addr_ptr, size_t H5_ATTR_UNUSED *new_len_ptr,
unsigned H5_ATTR_UNUSED *flags_ptr)
{
FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
@@ -196,7 +196,7 @@ H5C__prefetched_entry_pre_serialize(H5F_t H5_ATTR_UNUSED *f, void H5_ATTR_UNUSED
static herr_t
-H5C__prefetched_entry_serialize(const H5F_t H5_ATTR_UNUSED *f,
+H5C__prefetched_entry_serialize(const H5F_t H5_ATTR_UNUSED *f,
void H5_ATTR_UNUSED *image_ptr,
size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED *thing)
{
@@ -211,7 +211,7 @@ H5C__prefetched_entry_serialize(const H5F_t H5_ATTR_UNUSED *f,
/*-------------------------------------------------------------------------
* Function: H5C__prefetched_entry_notify
*
- * Purpose: On H5AC_NOTIFY_ACTION_BEFORE_EVICT, check to see if the
+ * Purpose: On H5AC_NOTIFY_ACTION_BEFORE_EVICT, check to see if the
* target entry is a child in a flush dependency relationship.
* If it is, destroy that flush dependency relationship.
*
@@ -243,18 +243,18 @@ H5C__prefetched_entry_notify(H5C_notify_action_t action, void *_thing)
case H5C_NOTIFY_ACTION_AFTER_INSERT:
case H5C_NOTIFY_ACTION_AFTER_LOAD:
case H5C_NOTIFY_ACTION_AFTER_FLUSH:
- case H5C_NOTIFY_ACTION_ENTRY_DIRTIED:
- case H5C_NOTIFY_ACTION_ENTRY_CLEANED:
- case H5C_NOTIFY_ACTION_CHILD_DIRTIED:
- case H5C_NOTIFY_ACTION_CHILD_CLEANED:
- case H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED:
- case H5C_NOTIFY_ACTION_CHILD_SERIALIZED:
+ case H5C_NOTIFY_ACTION_ENTRY_DIRTIED:
+ case H5C_NOTIFY_ACTION_ENTRY_CLEANED:
+ case H5C_NOTIFY_ACTION_CHILD_DIRTIED:
+ case H5C_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5C_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
case H5C_NOTIFY_ACTION_BEFORE_EVICT:
for(u = 0; u < entry_ptr->flush_dep_nparents; u++) {
- H5C_cache_entry_t * parent_ptr;
+ H5C_cache_entry_t * parent_ptr;
/* Sanity checks */
HDassert(entry_ptr->flush_dep_parent);
@@ -268,9 +268,9 @@ H5C__prefetched_entry_notify(H5C_notify_action_t action, void *_thing)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "unable to destroy prefetched entry flush dependency")
if(parent_ptr->prefetched) {
- /* In prefetched entries, the fd_child_count field is
- * used in sanity checks elsewhere. Thus update this
- * field to reflect the destruction of the flush
+ /* In prefetched entries, the fd_child_count field is
+ * used in sanity checks elsewhere. Thus update this
+ * field to reflect the destruction of the flush
* dependency relationship.
*/
HDassert(parent_ptr->fd_child_count > 0);
@@ -335,8 +335,8 @@ done:
} /* end H5C__prefetched_entry_free_icr() */
-static herr_t
-H5C__prefetched_entry_fsf_size(const void H5_ATTR_UNUSED *thing,
+static herr_t
+H5C__prefetched_entry_fsf_size(const void H5_ATTR_UNUSED *thing,
hsize_t H5_ATTR_UNUSED *fsf_size_ptr)
{
FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index d4ed6fc..7f79452 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -150,7 +150,7 @@
*
* These flags apply to H5C_protect()
* H5C__READ_ONLY_FLAG
- * H5C__FLUSH_LAST_FLAG ; super block only
+ * H5C__FLUSH_LAST_FLAG ; super block only
* H5C__FLUSH_COLLECTIVELY_FLAG ; super block only
*
* These flags apply to H5C_unprotect():
@@ -213,8 +213,8 @@
#define H5C_DO_TAGGING_SANITY_CHECKS 1
#define H5C_DO_EXTREME_SANITY_CHECKS 0
#else /* NDEBUG */
-/* With rare execptions, the following defines should be set
- * to 0 if NDEBUG is defined
+/* With rare execptions, the following defines should be set
+ * to 0 if NDEBUG is defined
*/
#define H5C_DO_SANITY_CHECKS 0
#define H5C_DO_SLIST_SANITY_CHECKS 0
@@ -292,50 +292,50 @@ typedef struct H5C_t H5C_t;
* H5C__CLASS_NO_FLAGS_SET: No special processing.
*
* H5C__CLASS_SPECULATIVE_LOAD_FLAG: This flag is used only in
- * H5C_load_entry(). When it is set, entries are
- * permitted to change their sizes on the first attempt
- * to load.
+ * H5C_load_entry(). When it is set, entries are
+ * permitted to change their sizes on the first attempt
+ * to load.
*
* If the new size is larger than the old, the read buffer
- * is reallocated to the new size, loaded from file, and the
- * deserialize routine is called a second time on the
- * new buffer. The entry returned by the first call to
+ * is reallocated to the new size, loaded from file, and the
+ * deserialize routine is called a second time on the
+ * new buffer. The entry returned by the first call to
* the deserialize routine is discarded (via the free_icr
* call) after the new size is retrieved (via the image_len
- * call). Note that the new size is used as the size of the
+ * call). Note that the new size is used as the size of the
* entry in the cache.
*
- * If the new size is smaller than the old, no new loads
+ * If the new size is smaller than the old, no new loads
* or deserializes are performed, but the new size becomes
* the size of the entry in the cache.
*
- * When this flag is set, an attempt to read past the
- * end of file could occur. In this case, if the size
- * returned get_load_size callback would result in a
- * read past the end of file, the size is truncated to
+ * When this flag is set, an attempt to read past the
+ * end of file could occur. In this case, if the size
+ * returned get_load_size callback would result in a
+ * read past the end of file, the size is truncated to
* avoid this, and processing proceeds as normal.
*
* The following flags may only appear in test code.
*
* H5C__CLASS_SKIP_READS: This flags is intended only for use in test
* code. When it is set, reads on load will be skipped,
- * and an uninitialize buffer will be passed to the
+ * and an uninitialize buffer will be passed to the
* deserialize function.
*
* H5C__CLASS_SKIP_WRITES: This flags is intended only for use in test
- * code. When it is set, writes of buffers prepared by the
+ * code. When it is set, writes of buffers prepared by the
* serialize callback will be skipped.
*
* GET_INITIAL_LOAD_SIZE: Pointer to the 'get initial load size' function.
*
- * This function determines the size based on the information in the
+ * This function determines the size based on the information in the
* parameter "udata" or an initial speculative guess. The size is
* returned in the parameter "image_len_ptr".
*
* For an entry with H5C__CLASS_NO_FLAGS_SET:
* This function returns in "image_len_ptr" the on disk size of the
* entry.
- *
+ *
* For an entry with H5C__CLASS_SPECULATIVE_LOAD_FLAG:
* This function returns in "image_len_ptr" an initial guess of the
* entry's on disk size. This many bytes will be loaded from
@@ -359,7 +359,7 @@ typedef struct H5C_t H5C_t;
* This value is used by the cache to determine the size of
* the disk image for the metadata, in order to read the disk
* image from the file.
- *
+ *
* Processing in the get_load_size function should proceed as follows:
*
* If successful, the function will place the length in the *image_len_ptr
@@ -420,17 +420,17 @@ typedef struct H5C_t H5C_t;
* the same as the checksum stored in the metadata.
*
* It computes the checksum based on the metadata stored in the
- * parameter "image_ptr" and the actual length of the metadata in the
+ * parameter "image_ptr" and the actual length of the metadata in the
* parameter "len" which is obtained from the "get_load_size" callback.
*
* The typedef for the verify_chksum callback is as follows:
*
- * typedef htri_t (*H5C_verify_chksum_func_t)(const void *image_ptr,
- * size_t len,
+ * typedef htri_t (*H5C_verify_chksum_func_t)(const void *image_ptr,
+ * size_t len,
* void *udata_ptr);
*
* The parameters of the verify_chksum callback are as follows:
- *
+ *
* image_ptr: Pointer to a buffer containing the metadata read in.
*
* len: The actual length of the metadata.
@@ -539,23 +539,23 @@ typedef struct H5C_t H5C_t;
* responsible for serializing the data structure, not moving it on disk
* or resizing it.
*
- * In addition, the client may use the pre-serialize callback to
- * ensure that the entry is ready to be flushed -- in particular,
- * if the entry contains references to other entries that are in
+ * In addition, the client may use the pre-serialize callback to
+ * ensure that the entry is ready to be flushed -- in particular,
+ * if the entry contains references to other entries that are in
* temporary file space, the pre-serialize callback must move those
- * entries into real file space so that the serialzed entry will
+ * entries into real file space so that the serialzed entry will
* contain no invalid data.
*
* One would think that the base address and length of
- * the length of the entry's image on disk would be well known.
+ * the length of the entry's image on disk would be well known.
* However, that need not be the case as free space section info
- * entries will change size (and possibly location) depending on the
- * number of blocks of free space being manages, and fractal heap
- * direct blocks can change compressed size (and possibly location)
+ * entries will change size (and possibly location) depending on the
+ * number of blocks of free space being manages, and fractal heap
+ * direct blocks can change compressed size (and possibly location)
* on serialization if compression is enabled. Similarly, it may
* be necessary to move entries from temporary to real file space.
*
- * The pre-serialize callback must report any such changes to the
+ * The pre-serialize callback must report any such changes to the
* cache, which must then update its internal structures as needed.
*
* The typedef for the pre-serialize callback is as follows:
@@ -575,7 +575,7 @@ typedef struct H5C_t H5C_t;
* target entry.
*
* thing: Pointer to void containing the address of the in core
- * representation of the target metadata cache entry.
+ * representation of the target metadata cache entry.
* This is the same pointer returned by a protect of the
* addr and len given above.
*
@@ -587,8 +587,8 @@ typedef struct H5C_t H5C_t;
* production mode.
*
* len: Length in bytes of the in file image of the entry to be
- * serialized. Also the size the image passed to the
- * serialize callback (discussed below) unless that
+ * serialized. Also the size the image passed to the
+ * serialize callback (discussed below) unless that
* value is altered by this function.
*
* This parameter is supplied mainly for sanity checking.
@@ -599,10 +599,10 @@ typedef struct H5C_t H5C_t;
* new_addr_ptr: Pointer to haddr_t. If the entry is moved by
* the serialize function, the new on disk base address must
* be stored in *new_addr_ptr, and the appropriate flag set
- * in *flags_ptr.
+ * in *flags_ptr.
*
- * If the entry is not moved by the serialize function,
- * *new_addr_ptr is undefined on pre-serialize callback
+ * If the entry is not moved by the serialize function,
+ * *new_addr_ptr is undefined on pre-serialize callback
* return.
*
* new_len_ptr: Pointer to size_t. If the entry is resized by the
@@ -610,8 +610,8 @@ typedef struct H5C_t H5C_t;
* must be stored in *new_len_ptr, and the appropriate flag set
* in *flags_ptr.
*
- * If the entry is not resized by the pre-serialize function,
- * *new_len_ptr is undefined on pre-serialize callback
+ * If the entry is not resized by the pre-serialize function,
+ * *new_len_ptr is undefined on pre-serialize callback
* return.
*
* flags_ptr: Pointer to an unsigned integer used to return flags
@@ -625,27 +625,27 @@ typedef struct H5C_t H5C_t;
* must be stored in *new_len_ptr.
*
* If the H5C__SERIALIZE_MOVED_FLAG flag is set, the
- * new image base address must be stored in *new_addr_ptr.
+ * new image base address must be stored in *new_addr_ptr.
*
* Processing in the pre-serialize function should proceed as follows:
*
* The pre-serialize function must examine the in core representation
* indicated by the thing parameter, if the pre-serialize function does
* not need to change the size or location of the on-disk image, it must
- * set *flags_ptr to zero.
+ * set *flags_ptr to zero.
*
* If the size of the on-disk image must be changed, the pre-serialize
* function must load the length of the new image into *new_len_ptr, and
- * set the H5C__SERIALIZE_RESIZED_FLAG in *flags_ptr.
+ * set the H5C__SERIALIZE_RESIZED_FLAG in *flags_ptr.
*
* If the base address of the on disk image must be changed, the
* pre-serialize function must set *new_addr_ptr to the new base address,
* and set the H5C__SERIALIZE_MOVED_FLAG in *flags_ptr.
*
- * In addition, the pre-serialize callback may perform any other
+ * In addition, the pre-serialize callback may perform any other
* processing required before the entry is written to disk
*
- * If it is successful, the function must return SUCCEED.
+ * If it is successful, the function must return SUCCEED.
*
* If it fails for any reason, the function must return FAIL and
* push error information on the error stack with the error API
@@ -660,11 +660,11 @@ typedef struct H5C_t H5C_t;
*
* At this point, the base address and length of the entry's image on
* disk must be well known and not change during the serialization
- * process.
+ * process.
*
- * While any size and/or location changes must have been handled
- * by a pre-serialize call, the client may elect to handle any other
- * changes to the entry required to place it in correct form for
+ * While any size and/or location changes must have been handled
+ * by a pre-serialize call, the client may elect to handle any other
+ * changes to the entry required to place it in correct form for
* writing to disk in this call.
*
* The typedef for the serialize callback is as follows:
@@ -698,20 +698,20 @@ typedef struct H5C_t H5C_t;
* production mode.
*
* thing: Pointer to void containing the address of the in core
- * representation of the target metadata cache entry.
+ * representation of the target metadata cache entry.
* This is the same pointer returned by a protect of the
* addr and len given above.
*
* Processing in the serialize function should proceed as follows:
*
- * If there are any remaining changes to the entry required before
+ * If there are any remaining changes to the entry required before
* write to disk, they must be dealt with first.
*
- * The serialize function must then examine the in core
- * representation indicated by the thing parameter, and write a
- * serialized image of its contents into the provided buffer.
+ * The serialize function must then examine the in core
+ * representation indicated by the thing parameter, and write a
+ * serialized image of its contents into the provided buffer.
*
- * If it is successful, the function must return SUCCEED.
+ * If it is successful, the function must return SUCCEED.
*
* If it fails for any reason, the function must return FAIL and
* push error information on the error stack with the error API
@@ -737,25 +737,25 @@ typedef struct H5C_t H5C_t;
* thing: Pointer to void containing the address of the in core
* representation of the target metadata cache entry. This
* is the same pointer that would be returned by a protect
- * of the addr and len of the entry.
+ * of the addr and len of the entry.
*
* Processing in the notify function should proceed as follows:
*
* The notify function may perform any action it would like, including
* metadata cache calls.
*
- * If the function is successful, it must return SUCCEED.
+ * If the function is successful, it must return SUCCEED.
*
* If it fails for any reason, the function must return FAIL and
* push error information on the error stack with the error API
- * routines.
+ * routines.
*
*
* FREE_ICR: Pointer to the free ICR callback.
*
* The free ICR callback is invoked by the metadata cache when it
* wishes to evict an entry, and needs the client to free the memory
- * allocated for the in core representation.
+ * allocated for the in core representation.
*
* The typedef for the free ICR callback is as follows:
*
@@ -766,18 +766,18 @@ typedef struct H5C_t H5C_t;
* thing: Pointer to void containing the address of the in core
* representation of the target metadata cache entry. This
* is the same pointer that would be returned by a protect
- * of the addr and len of the entry.
+ * of the addr and len of the entry.
*
* Processing in the free ICR function should proceed as follows:
*
* The free ICR function must free all memory allocated to the
- * in core representation.
+ * in core representation.
*
- * If the function is successful, it must return SUCCEED.
+ * If the function is successful, it must return SUCCEED.
*
* If it fails for any reason, the function must return FAIL and
* push error information on the error stack with the error API
- * routines.
+ * routines.
*
* At least when compiled with debug, it would be useful if the
* free ICR call would fail if the in core representation has been
@@ -787,30 +787,30 @@ typedef struct H5C_t H5C_t;
*
* In principle, there is no need for the get file space free size
* callback. However, as an optimization, it is sometimes convenient
- * to allocate and free file space for a number of cache entries
+ * to allocate and free file space for a number of cache entries
* simultaneously in a single contiguous block of file space.
*
* File space allocation is done by the client, so the metadata cache
* need not be involved. However, since the metadata cache typically
- * handles file space release when an entry is destroyed, some
+ * handles file space release when an entry is destroyed, some
* adjustment on the part of the metadata cache is required for this
* operation.
*
- * The get file space free size callback exists to support this
+ * The get file space free size callback exists to support this
* operation.
*
- * If a group of cache entries that were allocated as a group are to
+ * If a group of cache entries that were allocated as a group are to
* be discarded and their file space released, the type of the first
- * (i.e. lowest address) entry in the group must implement the
- * get free file space size callback.
+ * (i.e. lowest address) entry in the group must implement the
+ * get free file space size callback.
*
- * To free the file space of all entries in the group in a single
- * operation, first expunge all entries other than the first without
- * the free file space flag.
+ * To free the file space of all entries in the group in a single
+ * operation, first expunge all entries other than the first without
+ * the free file space flag.
*
* Then, to complete the operation, unprotect or expunge the first
- * entry in the block with the free file space flag set. Since
- * the get free file space callback is implemented, the metadata
+ * entry in the block with the free file space flag set. Since
+ * the get free file space callback is implemented, the metadata
* cache will use this callback to get the size of the block to be
* freed, instead of using the size of the entry as is done otherwise.
*
@@ -830,12 +830,12 @@ typedef struct H5C_t H5C_t;
* call of the associated addr and len.
*
* fs_size_ptr: Pointer to hsize_t in which the callback will return
- * the size of the piece of file space to be freed. Note
- * that the space to be freed is presumed to have the same
+ * the size of the piece of file space to be freed. Note
+ * that the space to be freed is presumed to have the same
* base address as the cache entry.
*
* The function simply returns the size of the block of file space
- * to be freed in *fsf_size_ptr.
+ * to be freed in *fsf_size_ptr.
*
* If the function is successful, it must return SUCCEED.
*
@@ -847,16 +847,16 @@ typedef struct H5C_t H5C_t;
/* Actions that can be reported to 'notify' client callback */
typedef enum H5C_notify_action_t {
- H5C_NOTIFY_ACTION_AFTER_INSERT, /* Entry has been added to the cache
+ H5C_NOTIFY_ACTION_AFTER_INSERT, /* Entry has been added to the cache
* via the insert call
*/
- H5C_NOTIFY_ACTION_AFTER_LOAD, /* Entry has been loaded into the
+ H5C_NOTIFY_ACTION_AFTER_LOAD, /* Entry has been loaded into the
* from file via the protect call
*/
H5C_NOTIFY_ACTION_AFTER_FLUSH, /* Entry has just been flushed to
* file.
*/
- H5C_NOTIFY_ACTION_BEFORE_EVICT, /* Entry is about to be evicted
+ H5C_NOTIFY_ACTION_BEFORE_EVICT, /* Entry is about to be evicted
* from cache.
*/
H5C_NOTIFY_ACTION_ENTRY_DIRTIED, /* Entry has been marked dirty. */
@@ -911,45 +911,45 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr,
*
* H5C_ring_t & associated #defines
*
- * The metadata cache uses the concept of rings to order the flushes of
- * classes of entries. In this arrangement, each entry in the cache is
- * assigned to a ring, and on flush, the members of the outermost ring
+ * The metadata cache uses the concept of rings to order the flushes of
+ * classes of entries. In this arrangement, each entry in the cache is
+ * assigned to a ring, and on flush, the members of the outermost ring
* are flushed first, followed by the next outermost, and so on with the
- * members of the innermost ring being flushed last.
+ * members of the innermost ring being flushed last.
*
- * Note that flush dependencies are used to order flushes within rings.
+ * Note that flush dependencies are used to order flushes within rings.
*
* Note also that at the conceptual level, rings are argueably superfluous,
- * as a similar effect could be obtained via the flush dependency mechanism.
- * However, this would require all entries in the cache to participate in a
- * flush dependency -- with the implied setup and takedown overhead and
- * added complexity. Further, the flush ordering between rings need only
- * be enforced on flush operations, and thus the use of flush dependencies
- * instead would apply unnecessary constraints on flushes under normal
+ * as a similar effect could be obtained via the flush dependency mechanism.
+ * However, this would require all entries in the cache to participate in a
+ * flush dependency -- with the implied setup and takedown overhead and
+ * added complexity. Further, the flush ordering between rings need only
+ * be enforced on flush operations, and thus the use of flush dependencies
+ * instead would apply unnecessary constraints on flushes under normal
* operating circumstances.
*
- * As of this writing, all metadata entries pretaining to data sets and
- * groups must be flushed first, and are thus assigned to the outermost
- * ring.
+ * As of this writing, all metadata entries pretaining to data sets and
+ * groups must be flushed first, and are thus assigned to the outermost
+ * ring.
*
* Free space managers managing file space must be flushed next,
* and are assigned to the second and third outermost rings. Two rings
* are used here as the raw data free space manager must be flushed before
* the metadata free space manager.
*
- * The object header and associated chunks used to implement superblock
- * extension messages must be flushed next, and are thus assigned to
+ * The object header and associated chunks used to implement superblock
+ * extension messages must be flushed next, and are thus assigned to
* the fourth outermost ring.
*
- * The superblock proper must be flushed last, and is thus assigned to
+ * The superblock proper must be flushed last, and is thus assigned to
* the innermost ring.
*
* The H5C_ring_t and the associated #defines below are used to define
- * the rings. Each entry must be assigned to the appropriate ring on
+ * the rings. Each entry must be assigned to the appropriate ring on
* insertion or protect.
*
- * Note that H5C_ring_t was originally an enumerated type. It was
- * converted to an integer and a set of #defines for convenience in
+ * Note that H5C_ring_t was originally an enumerated type. It was
+ * converted to an integer and a set of #defines for convenience in
* debugging.
*/
@@ -1015,8 +1015,8 @@ typedef int H5C_ring_t;
* dynamically allocated block of size bytes in which the
* on disk image of the metadata cache entry is stored.
*
- * If the entry is dirty, the pre-serialize and serialize
- * callbacks must be used to update this image before it is
+ * If the entry is dirty, the pre-serialize and serialize
+ * callbacks must be used to update this image before it is
* written to disk
*
* image_up_to_date: Boolean flag that is set to TRUE when *image_ptr
@@ -1038,14 +1038,14 @@ typedef int H5C_ring_t;
* dirtied while protected.
*
* This field is set to FALSE in the protect call, and may
- * be set to TRUE by the H5C_mark_entry_dirty() call at any
+ * be set to TRUE by the H5C_mark_entry_dirty() call at any
* time prior to the unprotect call.
*
- * The H5C_mark_entry_dirty() call exists as a convenience
- * function for the fractal heap code which may not know if
- * an entry is protected or pinned, but knows that is either
- * protected or pinned. The dirtied field was added as in
- * the parallel case, it is necessary to know whether a
+ * The H5C_mark_entry_dirty() call exists as a convenience
+ * function for the fractal heap code which may not know if
+ * an entry is protected or pinned, but knows that is either
+ * protected or pinned. The dirtied field was added as in
+ * the parallel case, it is necessary to know whether a
* protected entry is dirty prior to the protect call.
*
* is_protected: Boolean flag indicating whether this entry is protected
@@ -1090,9 +1090,9 @@ typedef int H5C_ring_t;
* policy code (LRU at present).
*
* 2) A pinned entry can be accessed or modified at any time.
- * This places an extra burden on the pre-serialize and
- * serialize callbacks, which must ensure that a pinned
- * entry is consistent and ready to write to disk before
+ * This places an extra burden on the pre-serialize and
+ * serialize callbacks, which must ensure that a pinned
+ * entry is consistent and ready to write to disk before
* generating an image.
*
* 3) A pinned entry can be marked as dirty (and possibly
@@ -1121,9 +1121,9 @@ typedef int H5C_ring_t;
* flushed from the cache until all other entries without
* the flush_me_last flag set have been flushed.
*
- * Note:
- *
- * At this time, the flush_me_last
+ * Note:
+ *
+ * At this time, the flush_me_last
* flag will only be applied to one entry, the superblock,
* and the code utilizing these flags is protected with HDasserts
* to enforce this. This restraint can certainly be relaxed in
@@ -1149,11 +1149,11 @@ typedef int H5C_ring_t;
* the unprotect, the entry's is_dirty flag is reset by flushing
* it with the H5C__FLUSH_CLEAR_ONLY_FLAG.
*
- * flush_immediately: Boolean flag used only in Phdf5 -- and then only
+ * flush_immediately: Boolean flag used only in Phdf5 -- and then only
* for H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED.
*
- * When a distributed metadata write is triggered at a
- * sync point, this field is used to mark entries that
+ * When a distributed metadata write is triggered at a
+ * sync point, this field is used to mark entries that
* must be flushed before leaving the sync point. At all
* other times, this field should be set to FALSE.
*
@@ -1167,10 +1167,10 @@ typedef int H5C_ring_t;
*
* Fields supporting rings for flush ordering:
*
- * All entries in the metadata cache are assigned to a ring. On cache
+ * All entries in the metadata cache are assigned to a ring. On cache
* flush, all entries in the outermost ring are flushed first, followed
- * by all members of the next outermost ring, and so on until the
- * innermost ring is flushed. Note that this ordering is ONLY applied
+ * by all members of the next outermost ring, and so on until the
+ * innermost ring is flushed. Note that this ordering is ONLY applied
* in flush and serialize calls. Rings are ignored during normal operations
* in which entries are flushed as directed by the replacement policy.
*
@@ -1211,7 +1211,7 @@ typedef int H5C_ring_t;
* this field is nonzero, then this entry cannot be flushed.
*
* flush_dep_nunser_children: Number of flush dependency children
- * that are either unserialized, or have a non-zero number of
+ * that are either unserialized, or have a non-zero number of
* positive number of unserialized children.
*
* Note that since there is no requirement that a clean entry
@@ -1219,7 +1219,7 @@ typedef int H5C_ring_t;
* to be greater than flush_dep_ndirty_children.
*
* This field exist to facilitate correct ordering of entry
- * serializations when it is necessary to serialize all the
+ * serializations when it is necessary to serialize all the
* entries in the metadata cache. Thus in the cache
* serialization, no entry can be serialized unless this
* field contains 0.
@@ -1232,7 +1232,7 @@ typedef int H5C_ring_t;
*
* Addendum: JRM -- 10/14/15
*
- * We have come to scan all entries in the cache frequently enough that
+ * We have come to scan all entries in the cache frequently enough that
* the cost of doing so by scanning the hash table has become unacceptable.
* To reduce this cost, the index now also maintains a doubly linked list
* of all entries in the index. This list is known as the index list.
@@ -1251,12 +1251,12 @@ typedef int H5C_ring_t;
*
* il_next: Next pointer used by the index to maintain a doubly linked
* list of all entries in the index (and thus in the cache).
- * This field contains a pointer to the next entry in the
+ * This field contains a pointer to the next entry in the
* index list, or NULL if there is no next entry.
*
* il_prev: Prev pointer used by the index to maintain a doubly linked
* list of all entries in the index (and thus in the cache).
- * This field contains a pointer to the previous entry in the
+ * This field contains a pointer to the previous entry in the
* index list, or NULL if there is no previous entry.
*
*
@@ -1294,71 +1294,71 @@ typedef int H5C_ring_t;
* The use of the replacement policy fields under the Modified LRU policy
* is discussed below:
*
- * next: Next pointer in either the LRU, the protected list, or
- * the pinned list depending on the current values of
- * is_protected and is_pinned. If there is no next entry
+ * next: Next pointer in either the LRU, the protected list, or
+ * the pinned list depending on the current values of
+ * is_protected and is_pinned. If there is no next entry
* on the list, this field should be set to NULL.
*
* prev: Prev pointer in either the LRU, the protected list,
- * or the pinned list depending on the current values of
- * is_protected and is_pinned. If there is no previous
+ * or the pinned list depending on the current values of
+ * is_protected and is_pinned. If there is no previous
* entry on the list, this field should be set to NULL.
*
* aux_next: Next pointer on either the clean or dirty LRU lists.
- * This entry should be NULL when either is_protected or
- * is_pinned is true.
+ * This entry should be NULL when either is_protected or
+ * is_pinned is true.
*
- * When is_protected and is_pinned are false, and is_dirty is
- * true, it should point to the next item on the dirty LRU
- * list.
+ * When is_protected and is_pinned are false, and is_dirty is
+ * true, it should point to the next item on the dirty LRU
+ * list.
*
- * When is_protected and is_pinned are false, and is_dirty is
- * false, it should point to the next item on the clean LRU
- * list. In either case, when there is no next item, it
+ * When is_protected and is_pinned are false, and is_dirty is
+ * false, it should point to the next item on the clean LRU
+ * list. In either case, when there is no next item, it
* should be NULL.
*
* aux_prev: Previous pointer on either the clean or dirty LRU lists.
- * This entry should be NULL when either is_protected or
- * is_pinned is true.
- *
- * When is_protected and is_pinned are false, and is_dirty is
- * true, it should point to the previous item on the dirty
- * LRU list.
- *
- * When is_protected and is_pinned are false, and is_dirty
- * is false, it should point to the previous item on the
- * clean LRU list.
- *
- * In either case, when there is no previous item, it should
+ * This entry should be NULL when either is_protected or
+ * is_pinned is true.
+ *
+ * When is_protected and is_pinned are false, and is_dirty is
+ * true, it should point to the previous item on the dirty
+ * LRU list.
+ *
+ * When is_protected and is_pinned are false, and is_dirty
+ * is false, it should point to the previous item on the
+ * clean LRU list.
+ *
+ * In either case, when there is no previous item, it should
* be NULL.
*
* Fields supporting the cache image feature:
*
- * The following fields are used to store data about the entry which must
- * be stored in the cache image block, but which will typically be either
- * lost or heavily altered in the process of serializing the cache and
+ * The following fields are used to store data about the entry which must
+ * be stored in the cache image block, but which will typically be either
+ * lost or heavily altered in the process of serializing the cache and
* preparing its contents to be copied into the cache image block.
*
* Some fields are also used in loading the contents of the metadata cache
- * image back into the cache, and in managing such entries until they are
- * either protected by the library (at which point they become regular
- * entries) or are evicted. See discussion of the prefetched field for
+ * image back into the cache, and in managing such entries until they are
+ * either protected by the library (at which point they become regular
+ * entries) or are evicted. See discussion of the prefetched field for
* further details.
*
* include_in_image: Boolean flag indicating whether this entry should
* be included in the metadata cache image. This field should
* always be false prior to the H5C_prep_for_file_close() call.
* During that call, it should be set to TRUE for all entries
- * that are to be included in the metadata cache image. At
- * present, only the superblock, the superblock extension
- * object header and its chunks (if any) are omitted from
+ * that are to be included in the metadata cache image. At
+ * present, only the superblock, the superblock extension
+ * object header and its chunks (if any) are omitted from
* the image.
*
* lru_rank: Rank of the entry in the LRU just prior to file close.
*
* Note that the first entry on the LRU has lru_rank 1,
- * and that entries not on the LRU at that time will have
- * either lru_rank -1 (if pinned) or 0 (if loaded during
+ * and that entries not on the LRU at that time will have
+ * either lru_rank -1 (if pinned) or 0 (if loaded during
* the process of flushing the cache.
*
* image_dirty: Boolean flag indicating whether the entry should be marked
@@ -1366,125 +1366,125 @@ typedef int H5C_ring_t;
* TRUE iff the entry is dirty when H5C_prep_for_file_close()
* is called.
*
- * fd_parent_count: If the entry is a child in one or more flush dependency
- * relationships, this field contains the number of flush
+ * fd_parent_count: If the entry is a child in one or more flush dependency
+ * relationships, this field contains the number of flush
* dependency parents.
*
* In all other cases, the field is set to zero.
*
- * Note that while this count is initially taken from the
- * flush dependency fields above, if the entry is in the
+ * Note that while this count is initially taken from the
+ * flush dependency fields above, if the entry is in the
* cache image (i.e. include_in_image is TRUE), any parents
* that are not in the image are removed from this count and
* from the fd_parent_addrs array below.
*
- * Finally observe that if the entry is dirty and in the
+ * Finally observe that if the entry is dirty and in the
* cache image, and its parent is dirty and not in the cache
* image, then the entry must be removed from the cache image
* to avoid violating the flush dependency flush ordering.
*
- * fd_parent_addrs: If the entry is a child in one or more flush dependency
- * relationship when H5C_prep_for_file_close() is called, this
- * field must contain a pointer to an array of size
- * fd_parent_count containing the on disk addresses of the
+ * fd_parent_addrs: If the entry is a child in one or more flush dependency
+ * relationship when H5C_prep_for_file_close() is called, this
+ * field must contain a pointer to an array of size
+ * fd_parent_count containing the on disk addresses of the
* parent.
*
* In all other cases, the field is set to NULL.
*
- * Note that while this list of addresses is initially taken
- * from the flush dependency fields above, if the entry is in the
+ * Note that while this list of addresses is initially taken
+ * from the flush dependency fields above, if the entry is in the
* cache image (i.e. include_in_image is TRUE), any parents
- * that are not in the image are removed from this list, and
+ * that are not in the image are removed from this list, and
* and from the fd_parent_count above.
*
- * Finally observe that if the entry is dirty and in the
+ * Finally observe that if the entry is dirty and in the
* cache image, and its parent is dirty and not in the cache
* image, then the entry must be removed from the cache image
* to avoid violating the flush dependency flush ordering.
*
- * fd_child_count: If the entry is a parent in a flush dependency
- * relationship, this field contains the number of flush
+ * fd_child_count: If the entry is a parent in a flush dependency
+ * relationship, this field contains the number of flush
* dependency children.
*
* In all other cases, the field is set to zero.
*
- * Note that while this count is initially taken from the
- * flush dependency fields above, if the entry is in the
+ * Note that while this count is initially taken from the
+ * flush dependency fields above, if the entry is in the
* cache image (i.e. include_in_image is TRUE), any children
* that are not in the image are removed from this count.
*
- * fd_dirty_child_count: If the entry is a parent in a flush dependency
- * relationship, this field contains the number of dirty flush
+ * fd_dirty_child_count: If the entry is a parent in a flush dependency
+ * relationship, this field contains the number of dirty flush
* dependency children.
*
* In all other cases, the field is set to zero.
*
- * Note that while this count is initially taken from the
- * flush dependency fields above, if the entry is in the
- * cache image (i.e. include_in_image is TRUE), any dirty
- * children that are not in the image are removed from this
+ * Note that while this count is initially taken from the
+ * flush dependency fields above, if the entry is in the
+ * cache image (i.e. include_in_image is TRUE), any dirty
+ * children that are not in the image are removed from this
* count.
*
* image_fd_height: Flush dependency height of the entry in the cache image.
*
- * The flush dependency height of any entry involved in a
- * flush dependency relationship is defined to be the
+ * The flush dependency height of any entry involved in a
+ * flush dependency relationship is defined to be the
* longest flush dependency path from that entry to an entry
- * with no flush dependency children.
+ * with no flush dependency children.
*
- * Since the image_fd_height is used to order entries in the
- * cache image so that fd parents preceed fd children, for
+ * Since the image_fd_height is used to order entries in the
+ * cache image so that fd parents preceed fd children, for
* purposes of this field, and entry is at flush dependency
* level 0 if it either has no children, or if all of its
- * children are not in the cache image.
+ * children are not in the cache image.
*
- * Note that if a child in a flush dependency relationship is
+ * Note that if a child in a flush dependency relationship is
* dirty and in the cache image, and its parent is dirty and
- * not in the cache image, then the child must be excluded
+ * not in the cache image, then the child must be excluded
* from the cache image to maintain flush ordering.
*
* prefetched: Boolean flag indicating that the on disk image of the entry
- * has been loaded into the cache prior any request for the
+ * has been loaded into the cache prior any request for the
* entry by the rest of the library.
*
- * As of this writing (8/10/15), this can only happen through
- * the load of a cache image block, although other scenarios
- * are contemplated for the use of this feature. Note that
- * unlike the usual prefetch situation, this means that a
- * prefetched entry can be dirty, and/or can be a party to
- * flush dependency relationship(s). This complicates matters
+ * As of this writing (8/10/15), this can only happen through
+ * the load of a cache image block, although other scenarios
+ * are contemplated for the use of this feature. Note that
+ * unlike the usual prefetch situation, this means that a
+ * prefetched entry can be dirty, and/or can be a party to
+ * flush dependency relationship(s). This complicates matters
* somewhat.
*
- * The essential feature of a prefetched entry is that it
- * consists only of a buffer containing the on disk image of
- * the entry. Thus it must be deserialized before it can
- * be passed back to the library on a protect call. This
+ * The essential feature of a prefetched entry is that it
+ * consists only of a buffer containing the on disk image of
+ * the entry. Thus it must be deserialized before it can
+ * be passed back to the library on a protect call. This
* task is handled by H5C_deserialized_prefetched_entry().
- * In essence, this routine calls the deserialize callback
- * provided in the protect call with the on disk image,
+ * In essence, this routine calls the deserialize callback
+ * provided in the protect call with the on disk image,
* deletes the prefetched entry from the cache, and replaces
* it with the deserialized entry returned by the deserialize
* callback.
*
- * Further, if the prefetched entry is a flush dependency parent,
- * all its flush dependency children (which must also be
- * prefetched entries), must be transferred to the new cache
+ * Further, if the prefetched entry is a flush dependency parent,
+ * all its flush dependency children (which must also be
+ * prefetched entries), must be transferred to the new cache
* entry returned by the deserialization callback.
*
- * Finally, if the prefetched entry is a flush dependency child,
- * this flush dependency must be destroyed prior to the
+ * Finally, if the prefetched entry is a flush dependency child,
+ * this flush dependency must be destroyed prior to the
* deserialize call.
*
- * In addition to the above special processing on the first
+ * In addition to the above special processing on the first
* protect call on a prefetched entry (after which is no longer
- * a prefetched entry), prefetched entries also require special
+ * a prefetched entry), prefetched entries also require special
* tretment on flush and evict.
*
- * On flush, a dirty prefetched entry must simply be written
- * to disk and marked clean without any call to any client
+ * On flush, a dirty prefetched entry must simply be written
+ * to disk and marked clean without any call to any client
* callback.
*
- * On eviction, if a prefetched entry is a flush dependency
+ * On eviction, if a prefetched entry is a flush dependency
* child, that flush dependency relationship must be destroyed
* just prior to the eviction. If the flush dependency code
* is working properly, it should be impossible for any entry
@@ -1496,35 +1496,35 @@ typedef int H5C_ring_t;
*
* The value of this field is undefined in prefetched is FALSE.
*
- * age: Number of times a prefetched entry has appeared in
- * subsequent cache images. The field exists to allow
- * imposition of a limit on how many times a prefetched
+ * age: Number of times a prefetched entry has appeared in
+ * subsequent cache images. The field exists to allow
+ * imposition of a limit on how many times a prefetched
* entry can appear in subsequent cache images without being
* converted to a regular entry.
*
- * This field must be zero if prefetched is FALSE.
+ * This field must be zero if prefetched is FALSE.
*
* prefetched_dirty: Boolean field that must be set to FALSE unless the
* following conditions hold:
*
* 1) The file has been opened R/O.
*
- * 2) The entry is either a prefetched entry, or was
+ * 2) The entry is either a prefetched entry, or was
* re-constructed from a prefetched entry.
*
* 3) The base prefetched entry was marked dirty.
*
- * This field exists to solve the following problem with
+ * This field exists to solve the following problem with
* files containing cache images that are opened R/O.
*
* If the cache image contains a dirty entry, that entry
* must be marked clean when it is inserted into the cache
- * in the read-only case, as otherwise the metadata cache
- * will attempt to flush it on file close -- which is poor
+ * in the read-only case, as otherwise the metadata cache
+ * will attempt to flush it on file close -- which is poor
* form in the read-only case.
*
- * However, since the entry is marked clean, it is possible
- * that the metadata cache will evict it if the size of the
+ * However, since the entry is marked clean, it is possible
+ * that the metadata cache will evict it if the size of the
* metadata in the file exceeds the size of the metadata cache,
* and the application visits much of this data.
*
@@ -1534,24 +1534,24 @@ typedef int H5C_ring_t;
* the entry has ever been written to it assigned location in
* the file.
*
- * With this background, the purpose of this field should be
- * obvious -- when set, it allows the eviction candidate
- * selection code to skip over the entry, thus avoiding the
+ * With this background, the purpose of this field should be
+ * obvious -- when set, it allows the eviction candidate
+ * selection code to skip over the entry, thus avoiding the
* issue.
*
- * Since the issue only arises in the R/O case, there is
- * no possible interaction with SWMR. There are also
+ * Since the issue only arises in the R/O case, there is
+ * no possible interaction with SWMR. There are also
* potential interactions with Evict On Close -- at present,
* we deal with this by disabling EOC in the R/O case.
*
- * serialization_count: Integer field used to maintain a count of the
- * number of times each entry is serialized during cache
+ * serialization_count: Integer field used to maintain a count of the
+ * number of times each entry is serialized during cache
* serialization. While no entry should be serialized more than
- * once in any serialization call, throw an assertion if any
- * flush depencency parent is serialized more than once during
+ * once in any serialization call, throw an assertion if any
+ * flush depencency parent is serialized more than once during
* a single cache serialization.
*
- * This is a debugging field, and thus is maintained only if
+ * This is a debugging field, and thus is maintained only if
* NDEBUG is undefined.
*
* Fields supporting tagged entries:
@@ -1688,7 +1688,7 @@ typedef struct H5C_cache_entry_t {
* structure H5C_image_entry_t
*
* Instances of the H5C_image_entry_t structure are used to store data on
- * metadata cache entries used in the construction of the metadata cache
+ * metadata cache entries used in the construction of the metadata cache
* image block. In essence this structure is a greatly simplified version
* of H5C_cache_entry_t.
*
@@ -1705,17 +1705,17 @@ typedef struct H5C_cache_entry_t {
*
* size: Length of the cache entry on disk in bytes.
*
- * ring: Instance of H5C_ring_t indicating the flush ordering ring
+ * ring: Instance of H5C_ring_t indicating the flush ordering ring
* to which this entry is assigned.
*
- * age: Number of times this prefetech entry has appeared in
- * the current sequence of cache images. This field is
+ * age: Number of times this prefetech entry has appeared in
+ * the current sequence of cache images. This field is
* initialized to 0 if the instance of H5C_image_entry_t
- * is constructed from a regular entry.
+ * is constructed from a regular entry.
*
- * If the instance is constructed from a prefetched entry
+ * If the instance is constructed from a prefetched entry
* currently residing in the metadata cache, the field is
- * set to 1 + the age of the prefetched entry, or to
+ * set to 1 + the age of the prefetched entry, or to
* H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX if that sum exceeds
* H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX.
*
@@ -1724,8 +1724,8 @@ typedef struct H5C_cache_entry_t {
* lru_rank: Rank of the entry in the LRU just prior to file close.
*
* Note that the first entry on the LRU has lru_rank 1,
- * and that entries not on the LRU at that time will have
- * either lru_rank -1 (if pinned) or 0 (if loaded during
+ * and that entries not on the LRU at that time will have
+ * either lru_rank -1 (if pinned) or 0 (if loaded during
* the process of flushing the cache.
*
* is_dirty: Boolean flag indicating whether the contents of the cache
@@ -1757,9 +1757,9 @@ typedef struct H5C_cache_entry_t {
* In all other cases, the field is set to zero.
*
* Note that while this count is initially taken from the
- * flush dependency fields in the associated instance of
- * H5C_cache_entry_t, if the entry is in the cache image
- * (i.e. include_in_image is TRUE), any parents that are
+ * flush dependency fields in the associated instance of
+ * H5C_cache_entry_t, if the entry is in the cache image
+ * (i.e. include_in_image is TRUE), any parents that are
* not in the image are removed from this count and
* from the fd_parent_addrs array below.
*
@@ -1767,7 +1767,7 @@ typedef struct H5C_cache_entry_t {
* cache image, and its parent is dirty and not in the cache
* image, then the entry must be removed from the cache image
* to avoid violating the flush dependency flush ordering.
- * This should have happened before the construction of
+ * This should have happened before the construction of
* the instance of H5C_image_entry_t.
*
* fd_parent_addrs: If the entry is a child in one or more flush dependency
@@ -1780,27 +1780,27 @@ typedef struct H5C_cache_entry_t {
*
* Note that while this list of addresses is initially taken
* from the flush dependency fields in the associated instance of
- * H5C_cache_entry_t, if the entry is in the cache image
- * (i.e. include_in_image is TRUE), any parents that are not
- * in the image are removed from this list, and from the
+ * H5C_cache_entry_t, if the entry is in the cache image
+ * (i.e. include_in_image is TRUE), any parents that are not
+ * in the image are removed from this list, and from the
* fd_parent_count above.
*
* Finally observe that if the entry is dirty and in the
* cache image, and its parent is dirty and not in the cache
* image, then the entry must be removed from the cache image
* to avoid violating the flush dependency flush ordering.
- * This should have happened before the construction of
+ * This should have happened before the construction of
* the instance of H5C_image_entry_t.
*
- * fd_child_count: If the entry is a parent in a flush dependency
- * relationship, this field contains the number of flush
+ * fd_child_count: If the entry is a parent in a flush dependency
+ * relationship, this field contains the number of flush
* dependency children.
*
* In all other cases, the field is set to zero.
*
* Note that while this count is initially taken from the
* flush dependency fields in the associated instance of
- * H5C_cache_entry_t, if the entry is in the cache image
+ * H5C_cache_entry_t, if the entry is in the cache image
* (i.e. include_in_image is TRUE), any children
* that are not in the image are removed from this count.
*
@@ -1812,16 +1812,16 @@ typedef struct H5C_cache_entry_t {
*
* Note that while this count is initially taken from the
* flush dependency fields in the associated instance of
- * H5C_cache_entry_t, if the entry is in the cache image
- * (i.e. include_in_image is TRUE), any dirty children
+ * H5C_cache_entry_t, if the entry is in the cache image
+ * (i.e. include_in_image is TRUE), any dirty children
* that are not in the image are removed from this count.
*
* image_ptr: Pointer to void. When not NULL, this field points to a
* dynamically allocated block of size bytes in which the
* on disk image of the metadata cache entry is stored.
*
- * If the entry is dirty, the pre-serialize and serialize
- * callbacks must be used to update this image before it is
+ * If the entry is dirty, the pre-serialize and serialize
+ * callbacks must be used to update this image before it is
* written to disk
*
*
@@ -2141,7 +2141,7 @@ typedef struct H5C_auto_size_ctl_t {
* fields for generation of a metadata cache image on file close.
*
* At present control of construction of a cache image is via a FAPL
- * property at file open / create.
+ * property at file open / create.
*
* The fields of the structure are discussed individually below:
*
@@ -2150,43 +2150,43 @@ typedef struct H5C_auto_size_ctl_t {
* H5C_image_ctl_t passed to the cache must have a known
* version number, or an error will be flagged.
*
- * generate_image: Boolean flag indicating whether a cache image should
+ * generate_image: Boolean flag indicating whether a cache image should
* be created on file close.
*
- * save_resize_status: Boolean flag indicating whether the cache image
+ * save_resize_status: Boolean flag indicating whether the cache image
* should include the adaptive cache resize configuration and status.
* Note that this field is ignored at present.
*
* entry_ageout: Integer field indicating the maximum number of
* times a prefetched entry can appear in subsequent cache images.
- * This field exists to allow the user to avoid the buildup of
+ * This field exists to allow the user to avoid the buildup of
* infrequently used entries in long sequences of cache images.
*
* The value of this field must lie in the range
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE (-1) to
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE (-1) to
* H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX (100).
*
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE means that no limit
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE means that no limit
* is imposed on number of times a prefeteched entry can appear
* in subsequent cache images.
*
- * A value of 0 prevents prefetched entries from being included
+ * A value of 0 prevents prefetched entries from being included
* in cache images.
*
* Positive integers restrict prefetched entries to the specified
* number of appearances.
- *
+ *
* Note that the number of subsequent cache images that a prefetched
* entry has appeared in is tracked in an 8 bit field. Thus, while
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX can be increased from its
- * current value, any value in excess of 255 will be the functional
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX can be increased from its
+ * current value, any value in excess of 255 will be the functional
* equivalent of H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE.
*
* flags: Unsigned integer containing flags controlling which aspects of the
- * cache image functinality is actually executed. The primary impetus
- * behind this field is to allow development of tests for partial
- * implementations that will require little if any modification to run
- * with the full implementation. In normal operation, all flags should
+ * cache image functinality is actually executed. The primary impetus
+ * behind this field is to allow development of tests for partial
+ * implementations that will require little if any modification to run
+ * with the full implementation. In normal operation, all flags should
* be set.
*
****************************************************************************/
@@ -2196,7 +2196,7 @@ typedef struct H5C_auto_size_ctl_t {
#define H5C_CI__SUPRESS_ENTRY_WRITES ((unsigned)0x0004)
#define H5C_CI__WRITE_CACHE_IMAGE ((unsigned)0x0008)
-/* This #define must set all defined H5C_CI flags. It is
+/* This #define must set all defined H5C_CI flags. It is
* used in the default value for instances of H5C_cache_image_ctl_t.
* This value will only be modified in test code.
*/
@@ -2242,7 +2242,7 @@ H5_DLL herr_t H5C_evict(H5F_t *f);
H5_DLL herr_t H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr,
unsigned flags);
H5_DLL herr_t H5C_flush_cache(H5F_t *f, unsigned flags);
-H5_DLL herr_t H5C_flush_tagged_entries(H5F_t *f, haddr_t tag);
+H5_DLL herr_t H5C_flush_tagged_entries(H5F_t *f, haddr_t tag);
H5_DLL herr_t H5C_force_cache_image_load(H5F_t * f);
H5_DLL herr_t H5C_evict_tagged_entries(H5F_t *f, haddr_t tag, hbool_t match_global);
H5_DLL herr_t H5C_expunge_tag_type_metadata(H5F_t *f, haddr_t tag, int type_id, unsigned flags);
@@ -2269,7 +2269,7 @@ H5_DLL void * H5C_get_aux_ptr(const H5C_t *cache_ptr);
H5_DLL herr_t H5C_image_stats(H5C_t * cache_ptr, hbool_t print_header);
H5_DLL herr_t H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr,
void *thing, unsigned int flags);
-H5_DLL herr_t H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr,
+H5_DLL herr_t H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr,
hsize_t len, hbool_t rw);
H5_DLL herr_t H5C_mark_entry_dirty(void *thing);
H5_DLL herr_t H5C_mark_entry_clean(void *thing);
@@ -2308,7 +2308,7 @@ H5_DLL herr_t H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring)
H5_DLL herr_t H5C_unsettle_entry_ring(void *thing);
H5_DLL herr_t H5C_unsettle_ring(H5F_t * f, H5C_ring_t ring);
H5_DLL herr_t H5C_remove_entry(void *thing);
-H5_DLL herr_t H5C_cache_image_status(H5F_t * f, hbool_t *load_ci_ptr,
+H5_DLL herr_t H5C_cache_image_status(H5F_t * f, hbool_t *load_ci_ptr,
hbool_t *write_ci_ptr);
H5_DLL hbool_t H5C_cache_image_pending(const H5C_t *cache_ptr);
H5_DLL herr_t H5C_get_mdc_image_info(H5C_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len);
diff --git a/src/H5Cquery.c b/src/H5Cquery.c
index 68258a6..48378c0 100644
--- a/src/H5Cquery.c
+++ b/src/H5Cquery.c
@@ -398,7 +398,7 @@ done:
* Function: H5C_get_mdc_image_info
*
* Purpose: To retrieve the address and size of the cache image in the file.
- *
+ *
* Return: SUCCEED on success, and FAIL on failure.
*
* Programmer: Vailin Choi; March 2017
@@ -419,7 +419,7 @@ H5C_get_mdc_image_info(H5C_t * cache_ptr, haddr_t *image_addr, hsize_t *image_le
*image_addr = cache_ptr->image_addr;
*image_len = cache_ptr->image_len;
-
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_get_mdc_image_info() */
diff --git a/src/H5Ctag.c b/src/H5Ctag.c
index d1ede0f..756d0be 100644
--- a/src/H5Ctag.c
+++ b/src/H5Ctag.c
@@ -56,16 +56,16 @@
/* Typedef for tagged entry iterator callback context - evict tagged entries */
typedef struct {
H5F_t *f; /* File pointer for evicting entry */
- hbool_t evicted_entries_last_pass; /* Flag to indicate that an entry
- * was evicted when iterating over
- * cache
+ hbool_t evicted_entries_last_pass; /* Flag to indicate that an entry
+ * was evicted when iterating over
+ * cache
*/
- hbool_t pinned_entries_need_evicted;/* Flag to indicate that a pinned
- * entry was attempted to be evicted
+ hbool_t pinned_entries_need_evicted;/* Flag to indicate that a pinned
+ * entry was attempted to be evicted
*/
- hbool_t skipped_pf_dirty_entries; /* Flag indicating that one or more
+ hbool_t skipped_pf_dirty_entries; /* Flag indicating that one or more
* entries marked prefetched_dirty
- * were encountered and not
+ * were encountered and not
* evicted.
*/
} H5C_tag_iter_evict_ctx_t;
@@ -113,10 +113,10 @@ H5FL_EXTERN(H5C_tag_info_t);
* Function: H5C_ignore_tags
*
* Purpose: Override all assertion frameworks associated with making
- * sure proper tags are applied to cache entries.
+ * sure proper tags are applied to cache entries.
*
- * NOTE: This should really only be used in tests that need
- * to access internal functions without going through
+ * NOTE: This should really only be used in tests that need
+ * to access internal functions without going through
* standard API paths. Since tags are set inside dxpl_id's
* before coming into the cache, any external functions that
* use the internal library functions (i.e., tests) should
@@ -204,8 +204,8 @@ H5C_get_num_objs_corked(const H5C_t *cache_ptr)
* Function: H5C__tag_entry
*
* Purpose: Tags an entry with the provided tag (contained in the API context).
- * If sanity checking is enabled, this function will perform
- * validation that a proper tag is contained within the provided
+ * If sanity checking is enabled, this function will perform
+ * validation that a proper tag is contained within the provided
* data access property list id before application.
*
* Return: FAIL if error is detected, SUCCEED otherwise.
@@ -234,10 +234,10 @@ H5C__tag_entry(H5C_t *cache, H5C_cache_entry_t *entry)
if(cache->ignore_tags) {
/* if we're ignoring tags, it's because we're running
- tests on internal functions and may not have inserted a tag
+ tests on internal functions and may not have inserted a tag
value into a given API context before creating some metadata. Thus,
in this case only, if a tag value has not been set, we can
- arbitrarily set it to something for the sake of passing the tests.
+ arbitrarily set it to something for the sake of passing the tests.
If the tag value is set, then we'll just let it get assigned without
additional checking for correctness. */
if(!H5F_addr_defined(tag))
@@ -399,7 +399,7 @@ H5C__iter_tagged_entries_real(H5C_t *cache, haddr_t tag, H5C_tag_iter_cb_t cb,
/* Make callback for entry */
if((cb)(entry, cb_ctx) != H5_ITER_CONT)
HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "tagged entry iteration callback failed")
-
+
/* Advance to next entry */
entry = next_entry;
} /* end while */
@@ -441,11 +441,11 @@ H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global,
HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "iteration of tagged entries failed")
/* Check for iterating over global metadata */
- if(match_global) {
+ if(match_global) {
/* Iterate over the entries for SOHM entries */
if(H5C__iter_tagged_entries_real(cache, H5AC__SOHM_TAG, cb, cb_ctx) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "iteration of tagged entries failed")
-
+
/* Iterate over the entries for global heap entries */
if(H5C__iter_tagged_entries_real(cache, H5AC__GLOBALHEAP_TAG, cb, cb_ctx) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "iteration of tagged entries failed")
@@ -553,29 +553,29 @@ H5C_evict_tagged_entries(H5F_t * f, haddr_t tag, hbool_t match_global)
/* Keep doing this until we have stopped evicted entries */
} while(TRUE == ctx.evicted_entries_last_pass);
- /* In most cases, fail if we have finished evicting entries and pinned
- * entries still need evicted
+ /* In most cases, fail if we have finished evicting entries and pinned
+ * entries still need evicted
*
- * However, things can get strange if the file was opened R/O and
- * the file contains a cache image and the cache image contains dirty
- * entries.
+ * However, things can get strange if the file was opened R/O and
+ * the file contains a cache image and the cache image contains dirty
+ * entries.
*
- * Since the file was opened read only, dirty entries in the cache
+ * Since the file was opened read only, dirty entries in the cache
* image were marked as clean when they were inserted into the metadata
* cache. This is necessary, as if they are marked dirty, the metadata
- * cache will attempt to write them on file close, which is frowned
+ * cache will attempt to write them on file close, which is frowned
* upon when the file is opened R/O.
*
- * On the other hand, such entries (marked prefetched_dirty) must not
+ * On the other hand, such entries (marked prefetched_dirty) must not
* be evicted, as should the cache be asked to re-load them, the cache
* will attempt to read them from the file, and at best load an outdated
* version.
- *
- * To avoid this, H5C__evict_tagged_entries_cb has been modified to
- * skip such entries. However, by doing so, it may prevent pinned
+ *
+ * To avoid this, H5C__evict_tagged_entries_cb has been modified to
+ * skip such entries. However, by doing so, it may prevent pinned
* entries from becoming unpinned.
*
- * Thus we must ignore ctx.pinned_entries_need_evicted if
+ * Thus we must ignore ctx.pinned_entries_need_evicted if
* ctx.skipped_pf_dirty_entries is TRUE.
*/
if((!ctx.skipped_pf_dirty_entries) && (ctx.pinned_entries_need_evicted))
@@ -631,7 +631,7 @@ H5C__mark_tagged_entries_cb(H5C_cache_entry_t *entry, void H5_ATTR_UNUSED *_ctx)
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5C__mark_tagged_entries(H5C_t *cache, haddr_t tag)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -698,16 +698,16 @@ H5C_verify_tag(int id, haddr_t tag)
if(tag == H5AC__SUPERBLOCK_TAG)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__SUPERBLOCK_TAG applied to non-superblock entry")
} /* end else */
-
+
/* Free Space Manager */
if(tag == H5AC__FREESPACE_TAG && ((id != H5AC_FSPACE_HDR_ID) && (id != H5AC_FSPACE_SINFO_ID)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__FREESPACE_TAG applied to non-freespace entry")
-
+
/* SOHM */
if((id == H5AC_SOHM_TABLE_ID) || (id == H5AC_SOHM_LIST_ID))
if(tag != H5AC__SOHM_TAG)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "sohm entry not tagged with H5AC__SOHM_TAG")
-
+
/* Global Heap */
if(id == H5AC_GHEAP_ID) {
if(tag != H5AC__GLOBALHEAP_TAG)
@@ -783,7 +783,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_retag_entries(H5C_t *cache, haddr_t src_tag, haddr_t dest_tag)
+H5C_retag_entries(H5C_t *cache, haddr_t src_tag, haddr_t dest_tag)
{
H5C_tag_info_t *tag_info; /* Points to a tag info struct */
herr_t ret_value = SUCCEED; /* Return value */
@@ -860,7 +860,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-herr_t
+herr_t
H5C_expunge_tag_type_metadata(H5F_t *f, haddr_t tag, int type_id, unsigned flags)
{
H5C_t *cache; /* Pointer to cache structure */
@@ -904,7 +904,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-herr_t
+herr_t
H5C_get_tag(const void *thing, haddr_t *tag /*OUT*/)
{
const H5C_cache_entry_t *entry = (const H5C_cache_entry_t *)thing; /* Pointer to cache entry */
diff --git a/src/H5D.c b/src/H5D.c
index f49a8a3..7030759 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -338,7 +338,7 @@ H5Dclose(hid_t dset_id)
/*
* Decrement the counter on the dataset. It will be freed if the count
- * reaches zero.
+ * reaches zero.
*/
if(H5I_dec_app_ref_always_close(dset_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "can't decrement count on dataset ID")
@@ -936,7 +936,7 @@ H5Dflush(hid_t dset_id)
FUNC_ENTER_API(FAIL)
H5TRACE1("e", "i", dset_id);
-
+
/* Check args */
if(NULL == (dset = (H5D_t *)H5I_object_verify(dset_id, H5I_DATASET)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
@@ -971,7 +971,7 @@ H5Drefresh(hid_t dset_id)
{
H5D_t *dset; /* Dataset to refresh */
herr_t ret_value = SUCCEED; /* return value */
-
+
FUNC_ENTER_API(FAIL)
H5TRACE1("e", "i", dset_id);
@@ -995,9 +995,9 @@ done:
/*-------------------------------------------------------------------------
* Function: H5Dformat_convert (Internal)
*
- * Purpose: For chunked:
+ * Purpose: For chunked:
* Convert the chunk indexing type to version 1 B-tree if not
- * For compact/contiguous:
+ * For compact/contiguous:
* Downgrade layout version to 3 if greater than 3
* For virtual: no conversion
*
@@ -1013,7 +1013,7 @@ H5Dformat_convert(hid_t dset_id)
{
H5D_t *dset; /* Dataset to refresh */
herr_t ret_value = SUCCEED; /* return value */
-
+
FUNC_ENTER_API(FAIL)
H5TRACE1("e", "i", dset_id);
@@ -1049,7 +1049,7 @@ H5Dformat_convert(hid_t dset_id)
case H5D_NLAYOUTS:
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataset layout type")
- default:
+ default:
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "unknown dataset layout type")
} /* end switch */
@@ -1075,7 +1075,7 @@ H5Dget_chunk_index_type(hid_t did, H5D_chunk_index_t *idx_type)
{
H5D_t *dset; /* Dataset to refresh */
herr_t ret_value = SUCCEED; /* return value */
-
+
FUNC_ENTER_API(FAIL)
H5TRACE2("e", "i*Dk", did, idx_type);
@@ -1186,7 +1186,7 @@ H5Dget_num_chunks(hid_t dset_id, hid_t fspace_id, hsize_t *nchunks)
done:
FUNC_LEAVE_API(ret_value);
} /* H5Dget_num_chunks() */
-
+
/*-------------------------------------------------------------------------
* Function: H5Dget_chunk_info
diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c
index e2a5a68..23d58ad 100644
--- a/src/H5Dbtree2.c
+++ b/src/H5Dbtree2.c
@@ -11,7 +11,7 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/*
+/*
*
* Purpose: v2 B-tree indexing for chunked datasets with > 1 unlimited dimensions.
* Each dataset chunk in the b-tree is identified by its dimensional offset.
@@ -105,7 +105,7 @@ static int H5D__bt2_idx_iterate_cb(const void *_record, void *_udata);
/* Callback for H5B2_find() which is called in H5D__bt2_idx_get_addr() */
static herr_t H5D__bt2_found_cb(const void *nrecord, void *op_data);
-/*
+/*
* Callback for H5B2_remove() and H5B2_delete() which is called
* in H5D__bt2_idx_remove() and H5D__bt2_idx_delete().
*/
@@ -251,7 +251,7 @@ H5D__bt2_crt_context(void *_udata)
H5MM_memcpy(my_dim, udata->dim, H5O_LAYOUT_NDIMS * sizeof(uint32_t));
ctx->dim = my_dim;
- /*
+ /*
* Compute the size required for encoding the size of a chunk,
* allowing for an extra byte, in case the filter makes the chunk larger.
*/
@@ -291,7 +291,7 @@ H5D__bt2_dst_context(void *_ctx)
/* Free array for chunk dimension sizes */
if(ctx->dim)
- (void)H5FL_BLK_FREE(chunk_dim, ctx->dim);
+ (void)H5FL_BLK_FREE(chunk_dim, ctx->dim);
/* Release callback context */
ctx = H5FL_FREE(H5D_bt2_ctx_t, ctx);
@@ -568,7 +568,7 @@ H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth,
const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */
const H5D_bt2_ctx_t *ctx = (const H5D_bt2_ctx_t *)_ctx; /* Callback context */
unsigned u; /* Local index variable */
-
+
FUNC_ENTER_STATIC_NOERR
/* Sanity checks */
@@ -734,9 +734,9 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5D__bt2_idx_create
+ * Function: H5D__bt2_idx_create
*
- * Purpose: Create the v2 B-tree for tracking dataset chunks
+ * Purpose: Create the v2 B-tree for tracking dataset chunks
*
* Return: SUCCEED/FAIL
*
@@ -768,7 +768,7 @@ H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info)
if(idx_info->pline->nused > 0) {
unsigned chunk_size_len; /* Size of encoded chunk size */
- /*
+ /*
* Compute the size required for encoding the size of a chunk,
* allowing for an extra byte, in case the filter makes the chunk larger.
*/
@@ -836,7 +836,7 @@ H5D__bt2_idx_is_space_alloc(const H5O_storage_chunk_t *storage)
* Function: H5D__bt2_mod_cb
*
* Purpose: Modify record for dataset chunk when it is found in a v2 B-tree.
- * This is the callback for H5B2_modify() which is called in
+ * This is the callback for H5B2_modify() which is called in
* H5D__bt2_idx_insert().
*
* Return: Success: non-negative
@@ -878,7 +878,7 @@ H5D__bt2_mod_cb(void *_record, void *_op_data, hbool_t *changed)
* Function: H5D__bt2_idx_insert
*
* Purpose: Insert chunk address into the indexing structure.
- * A non-filtered chunk:
+ * A non-filtered chunk:
* Should not exist
* Allocate the chunk and pass chunk address back up
* A filtered chunk:
@@ -953,7 +953,7 @@ done:
* Function: H5D__bt2_found_cb
*
* Purpose: Retrieve record for dataset chunk when it is found in a v2 B-tree.
- * This is the callback for H5B2_find() which is called in
+ * This is the callback for H5B2_find() which is called in
* H5D__bt2_idx_get_addr() and H5D__bt2_idx_insert().
*
* Return: Success: non-negative
@@ -1072,7 +1072,7 @@ done:
* Purpose: Translate the B-tree specific chunk record into a generic
* form and make the callback to the generic chunk callback
* routine.
- * This is the callback for H5B2_iterate() which is called in
+ * This is the callback for H5B2_iterate() which is called in
* H5D__bt2_idx_iterate().
*
* Return: Success: Non-negative
@@ -1162,7 +1162,7 @@ done:
*
* Purpose: Free space for 'dataset chunk' object as v2 B-tree
* is being deleted or v2 B-tree node is removed.
- * This is the callback for H5B2_remove() and H5B2_delete() which
+ * This is the callback for H5B2_remove() and H5B2_delete() which
* which are called in H5D__bt2_idx_remove() and H5D__bt2_idx_delete().
*
* Return: Success: non-negative
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index adf6719..08ab149 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -847,7 +847,7 @@ H5D__contig_readvv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len,
* the end of the data element, and don't read more than
* the buffer size.
*/
- min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
+ min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t);
/* Update local copies of sieve information */
@@ -1047,7 +1047,7 @@ H5D__contig_writevv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len,
max_data = store_contig->dset_size - dst_off;
/* Compute the size of the sieve buffer */
- min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
+ min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t);
/* Check if there is any point in reading the data from the file */
@@ -1168,7 +1168,7 @@ H5D__contig_writevv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len,
* the end of the data element, and don't read more than
* the buffer size.
*/
- min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
+ min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t);
/* Update local copies of sieve information */
diff --git a/src/H5Dfill.c b/src/H5Dfill.c
index 00c85d4..0cd8803 100644
--- a/src/H5Dfill.c
+++ b/src/H5Dfill.c
@@ -565,7 +565,7 @@ done:
*/
herr_t
H5D__fill_refill_vl(H5D_fill_buf_info_t *fb_info, size_t nelmts)
-{
+{
herr_t ret_value = SUCCEED; /* Return value */
void * buf = NULL; /* Temporary fill buffer */
diff --git a/src/H5Dio.c b/src/H5Dio.c
index a03f780..895f0bb 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -343,7 +343,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5Dwrite_chunk
*
- * Purpose: Writes an entire chunk to the file directly.
+ * Purpose: Writes an entire chunk to the file directly.
*
* Return: Non-negative on success/Negative on failure
*
@@ -353,14 +353,14 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5Dwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *offset,
+H5Dwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *offset,
size_t data_size, const void *buf)
{
H5D_t *dset = NULL;
hsize_t offset_copy[H5O_LAYOUT_NDIMS]; /* Internal copy of chunk offset */
uint32_t data_size_32; /* Chunk data size (limited to 32-bits currently) */
herr_t ret_value = SUCCEED; /* Return value */
-
+
FUNC_ENTER_API(FAIL)
H5TRACE6("e", "iiIu*hz*x", dset_id, dxpl_id, filters, offset, data_size, buf);
@@ -506,16 +506,16 @@ H5D__read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set")
/* H5S_select_shape_same() has been modified to accept topologically identical
- * selections with different rank as having the same shape (if the most
- * rapidly changing coordinates match up), but the I/O code still has
+ * selections with different rank as having the same shape (if the most
+ * rapidly changing coordinates match up), but the I/O code still has
* difficulties with the notion.
*
- * To solve this, we check to see if H5S_select_shape_same() returns true,
- * and if the ranks of the mem and file spaces are different. If the are,
- * construct a new mem space that is equivalent to the old mem space, and
+ * To solve this, we check to see if H5S_select_shape_same() returns true,
+ * and if the ranks of the mem and file spaces are different. If the are,
+ * construct a new mem space that is equivalent to the old mem space, and
* use that instead.
*
- * Note that in general, this requires us to touch up the memory buffer as
+ * Note that in general, this requires us to touch up the memory buffer as
* well.
*/
if(TRUE == H5S_select_shape_same(mem_space, file_space) &&
@@ -740,17 +740,17 @@ H5D__write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
if(!(H5S_has_extent(mem_space)))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set")
- /* H5S_select_shape_same() has been modified to accept topologically
- * identical selections with different rank as having the same shape
- * (if the most rapidly changing coordinates match up), but the I/O
+ /* H5S_select_shape_same() has been modified to accept topologically
+ * identical selections with different rank as having the same shape
+ * (if the most rapidly changing coordinates match up), but the I/O
* code still has difficulties with the notion.
*
- * To solve this, we check to see if H5S_select_shape_same() returns
- * true, and if the ranks of the mem and file spaces are different.
- * If the are, construct a new mem space that is equivalent to the
+ * To solve this, we check to see if H5S_select_shape_same() returns
+ * true, and if the ranks of the mem and file spaces are different.
+ * If the are, construct a new mem space that is equivalent to the
* old mem space, and use that instead.
*
- * Note that in general, this requires us to touch up the memory buffer
+ * Note that in general, this requires us to touch up the memory buffer
* as well.
*/
if(TRUE == H5S_select_shape_same(mem_space, file_space) &&
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index 0327c28..0f1f70d 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -363,13 +363,13 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space,
/* Check to see if the process is reading the entire dataset */
if(H5S_GET_SELECT_TYPE(file_space) != H5S_SEL_ALL)
- local_cause[1] |= H5D_MPIO_RANK0_NOT_H5S_ALL;
+ local_cause[1] |= H5D_MPIO_RANK0_NOT_H5S_ALL;
/* Only perform this optimization for contigous datasets, currently */
else if(H5D_CONTIGUOUS != io_info->dset->shared->layout.type)
- /* Flag to do a MPI_Bcast of the data from one proc instead of
+ /* Flag to do a MPI_Bcast of the data from one proc instead of
* having all the processes involved in the collective I/O.
*/
- local_cause[1] |= H5D_MPIO_RANK0_NOT_CONTIGUOUS;
+ local_cause[1] |= H5D_MPIO_RANK0_NOT_CONTIGUOUS;
else if((is_vl_storage = H5T_is_vl_storage(type_info->dset_type)) < 0)
local_cause[0] |= H5D_MPIO_ERROR_WHILE_CHECKING_COLLECTIVE_POSSIBLE;
else if(is_vl_storage)
@@ -393,7 +393,7 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space,
dset_size = ((hsize_t)snelmts) * type_size;
/* If the size of the dataset is less than 2GB then do an MPI_Bcast
- * of the data from one process instead of having all the processes
+ * of the data from one process instead of having all the processes
* involved in the collective I/O.
*/
if(dset_size > ((hsize_t)(2.0F * H5_GB) - 1))
@@ -401,7 +401,7 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space,
} /* end else */
} /* end else */
} /* end else */
-
+
/* Check for independent I/O */
if(local_cause[0] & H5D_MPIO_SET_INDEPENDENT)
global_cause[0] = local_cause[0];
@@ -812,7 +812,7 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
io_option = H5D_ONE_LINK_CHUNK_IO; /*no opt*/
/* direct request to multi-chunk-io */
else if(H5FD_MPIO_CHUNK_MULTI_IO == chunk_opt_mode)
- io_option = H5D_MULTI_CHUNK_IO;
+ io_option = H5D_MULTI_CHUNK_IO;
/* via default path. branch by num threshold */
else {
unsigned one_link_chunk_io_threshold; /* Threshold to use single collective I/O for all chunks */
@@ -1123,9 +1123,9 @@ if(H5DEBUG(D))
/* Obtain MPI derived datatype from all individual chunks */
for(u = 0; u < num_chunk; u++) {
- hsize_t *permute_map = NULL; /* array that holds the mapping from the old,
- out-of-order displacements to the in-order
- displacements of the MPI datatypes of the
+ hsize_t *permute_map = NULL; /* array that holds the mapping from the old,
+ out-of-order displacements to the in-order
+ displacements of the MPI datatypes of the
point selection of the file space */
hbool_t is_permuted = FALSE;
@@ -1135,8 +1135,8 @@ if(H5DEBUG(D))
* where it will be freed.
*/
if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.fspace,
- type_info->src_type_size,
- &chunk_ftype[u], /* OUT: datatype created */
+ type_info->src_type_size,
+ &chunk_ftype[u], /* OUT: datatype created */
&chunk_mpi_file_counts[u], /* OUT */
&(chunk_mft_is_derived_array[u]), /* OUT */
TRUE, /* this is a file space,
@@ -1154,9 +1154,9 @@ if(H5DEBUG(D))
if(is_permuted)
HDassert(permute_map);
if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.mspace,
- type_info->dst_type_size, &chunk_mtype[u],
- &chunk_mpi_mem_counts[u],
- &(chunk_mbt_is_derived_array[u]),
+ type_info->dst_type_size, &chunk_mtype[u],
+ &chunk_mpi_mem_counts[u],
+ &(chunk_mbt_is_derived_array[u]),
FALSE, /* this is a memory
space, so if the file
space is not
@@ -2016,9 +2016,9 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
if((file_space != NULL) && (mem_space != NULL)) {
int mpi_file_count; /* Number of file "objects" to transfer */
- hsize_t *permute_map = NULL; /* array that holds the mapping from the old,
- out-of-order displacements to the in-order
- displacements of the MPI datatypes of the
+ hsize_t *permute_map = NULL; /* array that holds the mapping from the old,
+ out-of-order displacements to the in-order
+ displacements of the MPI datatypes of the
point selection of the file space */
hbool_t is_permuted = FALSE;
@@ -2027,8 +2027,8 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
* and will be fed into the next call to H5S_mpio_space_type
* where it will be freed.
*/
- if(H5S_mpio_space_type(file_space, type_info->src_type_size,
- &mpi_file_type, &mpi_file_count, &mft_is_derived, /* OUT: datatype created */
+ if(H5S_mpio_space_type(file_space, type_info->src_type_size,
+ &mpi_file_type, &mpi_file_count, &mft_is_derived, /* OUT: datatype created */
TRUE, /* this is a file space, so
permute the datatype if the
point selection is out of
@@ -2037,13 +2037,13 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
the permutation of
points selected in
case they are out of
- order */
+ order */
&is_permuted /* OUT */) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type")
/* Sanity check */
if(is_permuted)
HDassert(permute_map);
- if(H5S_mpio_space_type(mem_space, type_info->src_type_size,
+ if(H5S_mpio_space_type(mem_space, type_info->src_type_size,
&mpi_buf_type, &mpi_buf_count, &mbt_is_derived, /* OUT: datatype created */
FALSE, /* this is a memory space, so if
the file space is not
@@ -2055,7 +2055,7 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
generated by the
file_space selection
and applied to the
- memory selection */,
+ memory selection */,
&is_permuted /* IN */) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buffer type")
/* Sanity check */
@@ -2625,7 +2625,7 @@ H5D__construct_filtered_io_info_list(const H5D_io_info_t *io_info, const H5D_typ
local_info_array[i].num_writers = 0;
local_info_array[i].owners.original_owner = local_info_array[i].owners.new_owner = mpi_rank;
local_info_array[i].buf = NULL;
-
+
local_info_array[i].async_info.num_receive_requests = 0;
local_info_array[i].async_info.receive_buffer_array = NULL;
local_info_array[i].async_info.receive_requests_array = NULL;
@@ -2741,7 +2741,7 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty
HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank")
if ((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file)) < 0)
HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
-
+
/* Set to latest format for encoding dataspace */
H5CX_set_libver_bounds(NULL);
diff --git a/src/H5Dnone.c b/src/H5Dnone.c
index 09e781e..83451ab 100644
--- a/src/H5Dnone.c
+++ b/src/H5Dnone.c
@@ -14,7 +14,7 @@
/* Programmer: Vailin Choi <vchoi@hdfgroup.org>
* September 2010
*
- * Purpose: Implicit (Non Index) chunked I/O functions.
+ * Purpose: Implicit (Non Index) chunked I/O functions.
* This is used when the dataset is:
* extendible but with fixed max. dims
* with early allocation
@@ -114,8 +114,8 @@ const H5D_chunk_ops_t H5D_COPS_NONE[1] = {{
* Function: H5D__none_idx_create
*
* Purpose: Allocate memory for the maximum # of chunks in the dataset.
- *
- * Return: Non-negative on success
+ *
+ * Return: Non-negative on success
* Negative on failure.
*
* Programmer: Vailin Choi; September 2010
@@ -471,7 +471,7 @@ H5D__none_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr)
/*-------------------------------------------------------------------------
* Function: H5D__none_idx_dump
*
- * Purpose: Dump
+ * Purpose: Dump
*
* Return: Non-negative on success/Negative on failure
*
diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h
index 2dcbeae..e5f4e07 100644
--- a/src/H5Dpublic.h
+++ b/src/H5Dpublic.h
@@ -149,7 +149,7 @@ H5_DLL herr_t H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
hid_t file_space_id, hid_t plist_id, void *buf/*out*/);
H5_DLL herr_t H5Dwrite(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
hid_t file_space_id, hid_t plist_id, const void *buf);
-H5_DLL herr_t H5Dwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters,
+H5_DLL herr_t H5Dwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters,
const hsize_t *offset, size_t data_size, const void *buf);
H5_DLL herr_t H5Dread_chunk(hid_t dset_id, hid_t dxpl_id,
const hsize_t *offset, uint32_t *filters, void *buf);
@@ -193,7 +193,7 @@ H5_DLL herr_t H5Dget_chunk_index_type(hid_t did, H5D_chunk_index_t *idx_type);
#define H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME "direct_chunk_read_flag"
#define H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME "direct_chunk_read_offset"
#define H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME "direct_chunk_read_filters"
-
+
/* Typedefs */
diff --git a/src/H5Dsingle.c b/src/H5Dsingle.c
index 321d258..d58b6d2 100644
--- a/src/H5Dsingle.c
+++ b/src/H5Dsingle.c
@@ -14,7 +14,7 @@
/* Programmer: Vailin Choi <vchoi@hdfgroup.org>
* May 2011; updated 10/2015
*
- * Purpose: Single Chunk I/O functions.
+ * Purpose: Single Chunk I/O functions.
* This is used when the dataset has only 1 chunk (with or without filter):
* cur_dims[] is equal to max_dims[] is equal to the chunk dims[]
* non-filter chunk record: [address of the chunk]
@@ -150,8 +150,8 @@ H5D__single_idx_init(const H5D_chk_idx_info_t *idx_info,
* Function: H5D__single_idx_create
*
* Purpose: Set up Single Chunk Index: filtered or non-filtered
- *
- * Return: Non-negative on success
+ *
+ * Return: Non-negative on success
* Negative on failure.
*
* Programmer: Vailin Choi; July 2011
diff --git a/src/H5EAcache.c b/src/H5EAcache.c
index baaa99b..31f7b00 100644
--- a/src/H5EAcache.c
+++ b/src/H5EAcache.c
@@ -116,7 +116,7 @@ static herr_t H5EA__cache_dblk_page_get_initial_load_size(void *udata, size_t *i
static htri_t H5EA__cache_dblk_page_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5EA__cache_dblk_page_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
-static herr_t H5EA__cache_dblk_page_image_len(const void *thing,
+static herr_t H5EA__cache_dblk_page_image_len(const void *thing,
size_t *image_len);
static herr_t H5EA__cache_dblk_page_serialize(const H5F_t *f, void *image, size_t len,
void *thing);
@@ -1466,12 +1466,12 @@ H5EA__cache_dblock_get_initial_load_size(void *_udata, size_t *image_len))
HDmemset(&dblock, 0, sizeof(dblock));
/* need to set:
- *
+ *
* dblock.hdr
* dblock.npages
* dblock.nelmts
*
- * before we invoke either H5EA_DBLOCK_PREFIX_SIZE() or
+ * before we invoke either H5EA_DBLOCK_PREFIX_SIZE() or
* H5EA_DBLOCK_SIZE().
*/
dblock.hdr = udata->hdr;
@@ -1563,7 +1563,7 @@ H5EA__cache_dblock_deserialize(const void *_image, size_t len,
if(NULL == (dblock = H5EA__dblock_alloc(udata->hdr, udata->parent, udata->nelmts)))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array data block")
- HDassert(((!dblock->npages) && (len == H5EA_DBLOCK_SIZE(dblock))) ||
+ HDassert(((!dblock->npages) && (len == H5EA_DBLOCK_SIZE(dblock))) ||
(len == H5EA_DBLOCK_PREFIX_SIZE(dblock)));
/* Set the extensible array data block's information */
diff --git a/src/H5F.c b/src/H5F.c
index 69d3e79..b04db89 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -1458,7 +1458,7 @@ done:
* Function: H5Fset_libver_bounds
*
* Purpose: Set to a different low and high bounds while a file is open.
- * This public routine is introduced in place of
+ * This public routine is introduced in place of
* H5Fset_latest_format() starting release 1.10.2.
* See explanation for H5Fset_latest_format() in H5Fdeprec.c.
*
diff --git a/src/H5FAcache.c b/src/H5FAcache.c
index ef2e5e3..fb00d2f 100644
--- a/src/H5FAcache.c
+++ b/src/H5FAcache.c
@@ -668,7 +668,7 @@ H5FA__cache_dblock_deserialize(const void *_image, size_t len,
if(NULL == (dblock = H5FA__dblock_alloc(udata->hdr)))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for fixed array data block")
- HDassert(((!dblock->npages) && (len == (size_t)H5FA_DBLOCK_SIZE(dblock)))
+ HDassert(((!dblock->npages) && (len == (size_t)H5FA_DBLOCK_SIZE(dblock)))
|| (len == (size_t)H5FA_DBLOCK_PREFIX_SIZE(dblock)));
/* Set the fixed array data block's information */
@@ -953,21 +953,21 @@ END_FUNC(STATIC) /* end H5FA__cache_dblock_free_icr() */
* to free when a dblock entry is destroyed with the free
* file space block set.
*
- * This function is needed when the data block is paged, as
+ * This function is needed when the data block is paged, as
* the datablock header and all its pages are allocted as a
- * single contiguous chunk of file space, and must be
+ * single contiguous chunk of file space, and must be
* deallocated the same way.
*
* The size of the chunk of memory in which the dblock
* header and all its pages is stored in the size field,
* so we simply pass that value back to the cache.
*
- * If the datablock is not paged, then the size field of
+ * If the datablock is not paged, then the size field of
* the cache_info contains the correct size. However this
* value will be the same as the size field, so we return
* the contents of the size field to the cache in this case
* as well.
- *
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -1157,7 +1157,7 @@ H5FA__cache_dblk_page_image_len(const void *_thing, size_t *image_len))
HDassert(image_len);
/* Set the image length size */
- *image_len = dblk_page->size;
+ *image_len = dblk_page->size;
END_FUNC(STATIC) /* end H5FA__cache_dblk_page_image_len() */
diff --git a/src/H5FDcore.c b/src/H5FDcore.c
index 805d54e..e4a3207 100644
--- a/src/H5FDcore.c
+++ b/src/H5FDcore.c
@@ -81,7 +81,7 @@ typedef struct H5FD_core_t {
DWORD nFileIndexLow;
DWORD nFileIndexHigh;
DWORD dwVolumeSerialNumber;
-
+
HANDLE hFile; /* Native windows file handle */
#endif /* H5_HAVE_WIN32_API */
hbool_t dirty; /* changes not saved? */
@@ -400,7 +400,7 @@ done:
*
* Purpose: Initializes any interface-specific data or routines.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
*-------------------------------------------------------------------------
*/
@@ -650,9 +650,9 @@ H5FD__core_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr
if((file_image_info.buffer != NULL) && !(H5F_ACC_CREAT & flags)) {
if(HDopen(name, o_flags, H5_POSIX_CREATE_MODE_RW) >= 0)
HGOTO_ERROR(H5E_FILE, H5E_FILEEXISTS, NULL, "file already exists")
-
+
/* If backing store is requested, create and stat the file
- * Note: We are forcing the O_CREAT flag here, even though this is
+ * Note: We are forcing the O_CREAT flag here, even though this is
* technically an open.
*/
if(fa->backing_store) {
@@ -749,14 +749,14 @@ H5FD__core_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr
/* Read in existing data, being careful of interrupted system calls,
* partial results, and the end of the file.
*/
-
+
uint8_t *mem = file->mem; /* memory pointer for writes */
HDoff_t offset = (HDoff_t)0; /* offset for reading */
-
+
while(size > 0) {
h5_posix_io_t bytes_in = 0; /* # of bytes to read */
h5_posix_io_ret_t bytes_read = -1; /* # of bytes actually read */
-
+
/* Trying to read more bytes than the return type can handle is
* undefined behavior in POSIX.
*/
@@ -764,7 +764,7 @@ H5FD__core_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr
bytes_in = H5_POSIX_MAX_IO_BYTES;
else
bytes_in = (h5_posix_io_t)size;
-
+
do {
#ifdef H5_HAVE_PREADWRITE
bytes_read = HDpread(file->fd, mem, bytes_in, offset);
@@ -774,7 +774,7 @@ H5FD__core_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr
bytes_read = HDread(file->fd, mem, bytes_in);
#endif /* H5_HAVE_PREADWRITE */
} while(-1 == bytes_read && EINTR == errno);
-
+
if(-1 == bytes_read) { /* error */
int myerrno = errno;
time_t mytime = HDtime(NULL);
@@ -783,10 +783,10 @@ H5FD__core_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr
HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "file read failed: time = %s, filename = '%s', file descriptor = %d, errno = %d, error message = '%s', file->mem = %p, total read size = %llu, bytes this sub-read = %llu, bytes actually read = %llu, offset = %llu", HDctime(&mytime), file->name, file->fd, myerrno, HDstrerror(myerrno), file->mem, (unsigned long long)size, (unsigned long long)bytes_in, (unsigned long long)bytes_read, (unsigned long long)offset);
} /* end if */
-
+
HDassert(bytes_read >= 0);
HDassert((size_t)bytes_read <= size);
-
+
mem += bytes_read;
size -= (size_t)bytes_read;
} /* end while */
@@ -813,7 +813,7 @@ H5FD__core_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr
* on open (when not read-only).
*/
/* Only use write tracking if the file is open for writing */
- use_write_tracking =
+ use_write_tracking =
TRUE == write_tracking_flag /* user asked for write tracking */
&& !(o_flags & O_RDONLY) /* file is open for writing (i.e. not read-only) */
&& file->bstore_page_size != 0; /* page size is not zero */
@@ -1367,24 +1367,24 @@ done:
* than the end-of-address.
*
* Addendum -- 12/2/11
- * For file images opened with the core file driver, it is
+ * For file images opened with the core file driver, it is
* necessary that we avoid reallocating the core file driver's
* buffer uneccessarily.
*
* To this end, I have made the following functional changes
- * to this function.
+ * to this function.
*
- * If we are closing, and there is no backing store, this
+ * If we are closing, and there is no backing store, this
* function becomes a no-op.
*
* If we are closing, and there is backing store, we set the
- * eof to equal the eoa, and truncate the backing store to
+ * eof to equal the eoa, and truncate the backing store to
* the new eof
*
- * If we are not closing, we realloc the buffer to size equal
- * to the smallest multiple of the allocation increment that
- * equals or exceeds the eoa and set the eof accordingly.
- * Note that we no longer truncate the backing store to the
+ * If we are not closing, we realloc the buffer to size equal
+ * to the smallest multiple of the allocation increment that
+ * equals or exceeds the eoa and set the eof accordingly.
+ * Note that we no longer truncate the backing store to the
* new eof if applicable.
* -- JRM
*
diff --git a/src/H5FDhdfs.h b/src/H5FDhdfs.h
index 3d4128d..37252f0 100644
--- a/src/H5FDhdfs.h
+++ b/src/H5FDhdfs.h
@@ -43,7 +43,7 @@ extern "C" {
*
* `version` (int32_t)
*
- * Version number of the `H5FD_hdfs_fapl_t` structure. Any instance passed
+ * Version number of the `H5FD_hdfs_fapl_t` structure. Any instance passed
* to the above calls must have a recognized version number, or an error
* will be flagged.
*
diff --git a/src/H5FDint.c b/src/H5FDint.c
index 023ff57..b46ecf2 100644
--- a/src/H5FDint.c
+++ b/src/H5FDint.c
@@ -183,7 +183,7 @@ H5FD_read(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size,
HGOTO_DONE(SUCCEED)
#endif /* H5_HAVE_PARALLEL */
- /*
+ /*
* If the file is open for SWMR read access, allow access to data past
* the end of the allocated space (the 'eoa'). This is done because the
* eoa stored in the file's superblock might be out of sync with the
@@ -250,7 +250,7 @@ H5FD_write(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size,
if(HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type)))
HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed")
if((addr + file->base_addr + size) > eoa)
- HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, addr = %llu, size=%llu, eoa=%llu",
+ HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, addr = %llu, size=%llu, eoa=%llu",
(unsigned long long)(addr+ file->base_addr), (unsigned long long)size, (unsigned long long)eoa)
/* Dispatch to driver */
@@ -412,7 +412,7 @@ H5FD_driver_query(const H5FD_class_t *driver, unsigned long *flags/*out*/)
/* Check for the driver to query and then query it */
if(driver->query)
ret_value = (driver->query)(NULL, flags);
- else
+ else
*flags = 0;
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5FDlog.c b/src/H5FDlog.c
index fc9d7c8..0e50df0 100644
--- a/src/H5FDlog.c
+++ b/src/H5FDlog.c
@@ -108,7 +108,7 @@ typedef struct H5FD_log_t {
DWORD nFileIndexLow;
DWORD nFileIndexHigh;
DWORD dwVolumeSerialNumber;
-
+
HANDLE hFile; /* Native windows file handle */
#endif /* H5_HAVE_WIN32_API */
@@ -329,8 +329,8 @@ H5Pset_fapl_log(hid_t fapl_id, const char *logfile, unsigned long long flags, si
if(NULL == (plist = H5P_object_verify(fapl_id, H5P_FILE_ACCESS)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list")
- /* This shallow copy is correct! The string will be properly
- * copied deep down in the H5P code.
+ /* This shallow copy is correct! The string will be properly
+ * copied deep down in the H5P code.
*/
fa.logfile = (char *)logfile;
@@ -1247,7 +1247,7 @@ H5FD_log_read(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, hadd
while(size > 0) {
h5_posix_io_t bytes_in = 0; /* # of bytes to read */
- h5_posix_io_ret_t bytes_read = -1; /* # of bytes actually read */
+ h5_posix_io_ret_t bytes_read = -1; /* # of bytes actually read */
/* Trying to read more bytes than the return type can handle is
* undefined behavior in POSIX.
@@ -1287,7 +1287,7 @@ H5FD_log_read(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, hadd
HDassert(bytes_read >= 0);
HDassert((size_t)bytes_read <= size);
-
+
size -= (size_t)bytes_read;
addr += (haddr_t)bytes_read;
buf = (char *)buf + bytes_read;
@@ -1464,7 +1464,7 @@ H5FD_log_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, had
while(size > 0) {
h5_posix_io_t bytes_in = 0; /* # of bytes to write */
- h5_posix_io_ret_t bytes_wrote = -1; /* # of bytes written */
+ h5_posix_io_ret_t bytes_wrote = -1; /* # of bytes written */
/* Trying to write more bytes than the return type can handle is
* undefined behavior in POSIX.
diff --git a/src/H5FDmpi.c b/src/H5FDmpi.c
index 4b42a73..eb12b93 100644
--- a/src/H5FDmpi.c
+++ b/src/H5FDmpi.c
@@ -315,11 +315,11 @@ H5FD_mpi_comm_info_dup(MPI_Comm comm, MPI_Info info, MPI_Comm *comm_new, MPI_Inf
info_dup = info;
}
- /* Set MPI_ERRORS_RETURN on comm_dup so that MPI failures are not fatal,
+ /* Set MPI_ERRORS_RETURN on comm_dup so that MPI failures are not fatal,
and return codes can be checked and handled. May 23, 2017 FTW */
if (MPI_SUCCESS != (mpi_code = MPI_Comm_set_errhandler(comm_dup, MPI_ERRORS_RETURN)))
HMPI_GOTO_ERROR(FAIL, "MPI_Errhandler_set failed", mpi_code)
-
+
/* copy them to the return arguments */
*comm_new = comm_dup;
*info_new = info_dup;
diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c
index aa1b118..4e24fda 100644
--- a/src/H5FDmulti.c
+++ b/src/H5FDmulti.c
@@ -92,7 +92,7 @@ typedef struct H5FD_multi_t {
haddr_t memb_next[H5FD_MEM_NTYPES]; /*addr of next member */
H5FD_t *memb[H5FD_MEM_NTYPES]; /*member pointers */
haddr_t memb_eoa[H5FD_MEM_NTYPES]; /*EOA for individual files,
- *end of allocated addresses. v1.6 library
+ *end of allocated addresses. v1.6 library
*have the EOA for the entire file. But it's
*meaningless for MULTI file. We replaced it
*with the EOAs for individual files */
@@ -836,9 +836,9 @@ H5FD_multi_sb_decode(H5FD_t *_file, const char *name, const unsigned char *buf)
if (file->memb[mt])
if(H5FDset_eoa(file->memb[mt], mt, memb_eoa[mt])<0)
H5Epush_ret(func, H5E_ERR_CLS, H5E_INTERNAL, H5E_CANTSET, "set_eoa() failed", -1)
-
- /* Save the individual EOAs in one place for later comparison (in H5FD_multi_set_eoa) */
- file->memb_eoa[mt] = memb_eoa[mt];
+
+ /* Save the individual EOAs in one place for later comparison (in H5FD_multi_set_eoa) */
+ file->memb_eoa[mt] = memb_eoa[mt];
} END_MEMBERS;
return 0;
@@ -1007,7 +1007,7 @@ H5FD_multi_open(const char *name, unsigned flags, hid_t fapl_id,
/*
* Initialize the file from the file access properties, using default
* values if necessary. Make sure to use CALLOC here because the code
- * in H5FD_multi_set_eoa depends on the proper initialization of memb_eoa
+ * in H5FD_multi_set_eoa depends on the proper initialization of memb_eoa
* in H5FD_multi_t.
*/
if(NULL == (file = (H5FD_multi_t *)calloc((size_t)1, sizeof(H5FD_multi_t))))
diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h
index 0430064..c4f8006 100644
--- a/src/H5FDprivate.h
+++ b/src/H5FDprivate.h
@@ -130,7 +130,7 @@ H5_DLL H5FD_t *H5FD_open(const char *name, unsigned flags, hid_t fapl_id,
H5_DLL herr_t H5FD_close(H5FD_t *file);
H5_DLL int H5FD_cmp(const H5FD_t *f1, const H5FD_t *f2);
H5_DLL herr_t H5FD_driver_query(const H5FD_class_t *driver, unsigned long *flags/*out*/);
-H5_DLL haddr_t H5FD_alloc(H5FD_t *file, H5FD_mem_t type,
+H5_DLL haddr_t H5FD_alloc(H5FD_t *file, H5FD_mem_t type,
struct H5F_t *f, hsize_t size, haddr_t *frag_addr, hsize_t *frag_size);
H5_DLL herr_t H5FD_free(H5FD_t *file, H5FD_mem_t type, struct H5F_t *f,
haddr_t addr, hsize_t size);
diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h
index 514d1bf..54ca8f7 100644
--- a/src/H5FDpublic.h
+++ b/src/H5FDpublic.h
@@ -207,7 +207,7 @@ typedef enum H5F_mem_t H5FD_mem_t;
* the handle for the VFD (returned with the 'get_handle' callback) is
* of type 'int' and is compatible with POSIX I/O calls.
*/
-#define H5FD_FEAT_POSIX_COMPAT_HANDLE 0x00000080
+#define H5FD_FEAT_POSIX_COMPAT_HANDLE 0x00000080
/*
* Defining H5FD_FEAT_HAS_MPI for a VFL driver means that
* the driver makes use of MPI communication and code may retrieve
@@ -220,7 +220,7 @@ typedef enum H5F_mem_t H5FD_mem_t;
* instead of the default H5D_ALLOC_TIME_LATE
*/
#define H5FD_FEAT_ALLOCATE_EARLY 0x00000200
- /*
+ /*
* Defining H5FD_FEAT_ALLOW_FILE_IMAGE for a VFL driver means that
* the driver is able to use a file image in the fapl as the initial
* contents of a file.
@@ -332,7 +332,7 @@ struct H5FD_t {
/* Define enum for the source of file image callbacks */
typedef enum {
H5FD_FILE_IMAGE_OP_NO_OP,
- H5FD_FILE_IMAGE_OP_PROPERTY_LIST_SET,
+ H5FD_FILE_IMAGE_OP_PROPERTY_LIST_SET,
H5FD_FILE_IMAGE_OP_PROPERTY_LIST_COPY,
H5FD_FILE_IMAGE_OP_PROPERTY_LIST_GET,
H5FD_FILE_IMAGE_OP_PROPERTY_LIST_CLOSE,
@@ -343,13 +343,13 @@ typedef enum {
/* Define structure to hold file image callbacks */
typedef struct {
- void *(*image_malloc)(size_t size, H5FD_file_image_op_t file_image_op,
+ void *(*image_malloc)(size_t size, H5FD_file_image_op_t file_image_op,
void *udata);
void *(*image_memcpy)(void *dest, const void *src, size_t size,
H5FD_file_image_op_t file_image_op, void *udata);
- void *(*image_realloc)(void *ptr, size_t size,
+ void *(*image_realloc)(void *ptr, size_t size,
H5FD_file_image_op_t file_image_op, void *udata);
- herr_t (*image_free)(void *ptr, H5FD_file_image_op_t file_image_op,
+ herr_t (*image_free)(void *ptr, H5FD_file_image_op_t file_image_op,
void *udata);
void *(*udata_copy)(void *udata);
herr_t (*udata_free)(void *udata);
diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c
index 974a775..b316666 100644
--- a/src/H5FDsec2.c
+++ b/src/H5FDsec2.c
@@ -85,7 +85,7 @@ typedef struct H5FD_sec2_t {
DWORD nFileIndexLow;
DWORD nFileIndexHigh;
DWORD dwVolumeSerialNumber;
-
+
HANDLE hFile; /* Native windows file handle */
#endif /* H5_HAVE_WIN32_API */
@@ -599,7 +599,7 @@ H5FD_sec2_set_eoa(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t addr)
* either the filesystem end-of-file or the HDF5 end-of-address
* markers.
*
- * Return: End of file address, the first address past the end of the
+ * Return: End of file address, the first address past the end of the
* "file", either the filesystem file or the HDF5 file.
*
* Programmer: Robb Matzke
@@ -716,7 +716,7 @@ H5FD_sec2_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNUS
bytes_read = HDread(file->fd, buf, bytes_in);
#endif /* H5_HAVE_PREADWRITE */
} while(-1 == bytes_read && EINTR == errno);
-
+
if(-1 == bytes_read) { /* error */
int myerrno = errno;
time_t mytime = HDtime(NULL);
@@ -725,16 +725,16 @@ H5FD_sec2_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNUS
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "file read failed: time = %s, filename = '%s', file descriptor = %d, errno = %d, error message = '%s', buf = %p, total read size = %llu, bytes this sub-read = %llu, bytes actually read = %llu, offset = %llu", HDctime(&mytime), file->filename, file->fd, myerrno, HDstrerror(myerrno), buf, (unsigned long long)size, (unsigned long long)bytes_in, (unsigned long long)bytes_read, (unsigned long long)offset);
} /* end if */
-
+
if(0 == bytes_read) {
/* end of file but not end of format address space */
HDmemset(buf, 0, size);
break;
} /* end if */
-
+
HDassert(bytes_read >= 0);
HDassert((size_t)bytes_read <= size);
-
+
size -= (size_t)bytes_read;
addr += (haddr_t)bytes_read;
buf = (char *)buf + bytes_read;
@@ -802,7 +802,7 @@ H5FD_sec2_write(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
while(size > 0) {
h5_posix_io_t bytes_in = 0; /* # of bytes to write */
- h5_posix_io_ret_t bytes_wrote = -1; /* # of bytes written */
+ h5_posix_io_ret_t bytes_wrote = -1; /* # of bytes written */
/* Trying to write more bytes than the return type can handle is
* undefined behavior in POSIX.
@@ -821,7 +821,7 @@ H5FD_sec2_write(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
bytes_wrote = HDwrite(file->fd, buf, bytes_in);
#endif /* H5_HAVE_PREADWRITE */
} while(-1 == bytes_wrote && EINTR == errno);
-
+
if(-1 == bytes_wrote) { /* error */
int myerrno = errno;
time_t mytime = HDtime(NULL);
@@ -830,7 +830,7 @@ H5FD_sec2_write(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed: time = %s, filename = '%s', file descriptor = %d, errno = %d, error message = '%s', buf = %p, total write size = %llu, bytes this sub-write = %llu, bytes actually written = %llu, offset = %llu", HDctime(&mytime), file->filename, file->fd, myerrno, HDstrerror(myerrno), buf, (unsigned long long)size, (unsigned long long)bytes_in, (unsigned long long)bytes_wrote, (unsigned long long)offset);
} /* end if */
-
+
HDassert(bytes_wrote > 0);
HDassert((size_t)bytes_wrote <= size);
diff --git a/src/H5FDstdio.c b/src/H5FDstdio.c
index 861c6a6..d29a1b4 100644
--- a/src/H5FDstdio.c
+++ b/src/H5FDstdio.c
@@ -110,7 +110,7 @@ typedef struct H5FD_stdio_t {
DWORD nFileIndexLow;
DWORD nFileIndexHigh;
DWORD dwVolumeSerialNumber;
-
+
HANDLE hFile; /* Native windows file handle */
#endif /* H5_HAVE_WIN32_API */
} H5FD_stdio_t;
@@ -824,13 +824,13 @@ H5FD_stdio_read(H5FD_t *_file, H5FD_mem_t /*UNUSED*/ type, hid_t /*UNUSED*/ dxpl
file->pos = HADDR_UNDEF;
H5Epush_ret(func, H5E_ERR_CLS, H5E_IO, H5E_READERROR, "fread failed", -1)
} /* end if */
-
+
if(0 == bytes_read && feof(file->fp)) {
/* end of file but not end of format address space */
memset((unsigned char *)buf, 0, size);
break;
} /* end if */
-
+
size -= bytes_read;
addr += (haddr_t)bytes_read;
buf = (char *)buf + bytes_read;
@@ -914,7 +914,7 @@ H5FD_stdio_write(H5FD_t *_file, H5FD_mem_t /*UNUSED*/ type, hid_t /*UNUSED*/ dxp
file->pos = HADDR_UNDEF;
H5Epush_ret(func, H5E_ERR_CLS, H5E_IO, H5E_WRITEERROR, "fwrite failed", -1)
} /* end if */
-
+
assert(bytes_wrote > 0);
assert((size_t)bytes_wrote <= size);
@@ -1040,7 +1040,7 @@ H5FD_stdio_truncate(H5FD_t *_file, hid_t /*UNUSED*/ dxpl_id,
if(dwError != NO_ERROR )
H5Epush_ret(func, H5E_ERR_CLS, H5E_FILE, H5E_FILEOPEN, "unable to set file pointer", -1)
}
-
+
bError = SetEndOfFile(file->hFile);
if(0 == bError)
H5Epush_ret(func, H5E_ERR_CLS, H5E_IO, H5E_SEEKERROR, "unable to truncate/extend file properly", -1)
diff --git a/src/H5FS.c b/src/H5FS.c
index 0d1ba41..79915dc 100644
--- a/src/H5FS.c
+++ b/src/H5FS.c
@@ -1171,7 +1171,7 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5FS_sinfo_dest() */
-herr_t
+herr_t
H5FS_get_sect_count(const H5FS_t *frsp, hsize_t *tot_sect_count)
{
herr_t ret_value = SUCCEED; /* Return value */
diff --git a/src/H5FScache.c b/src/H5FScache.c
index aa061bc..470f42e 100644
--- a/src/H5FScache.c
+++ b/src/H5FScache.c
@@ -84,7 +84,7 @@ static void *H5FS__cache_hdr_deserialize(const void *image, size_t len,
static herr_t H5FS__cache_hdr_image_len(const void *thing, size_t *image_len);
static herr_t H5FS__cache_hdr_pre_serialize(H5F_t *f, void *thing, haddr_t addr,
size_t len, haddr_t *new_addr, size_t *new_len, unsigned *flags);
-static herr_t H5FS__cache_hdr_serialize(const H5F_t *f, void *image,
+static herr_t H5FS__cache_hdr_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
static herr_t H5FS__cache_hdr_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5FS__cache_hdr_free_icr(void *thing);
@@ -240,7 +240,7 @@ H5FS__cache_hdr_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSE
*-------------------------------------------------------------------------
*/
static void *
-H5FS__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
+H5FS__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
hbool_t H5_ATTR_UNUSED *dirty)
{
H5FS_t *fspace = NULL; /* Free space header info */
@@ -302,8 +302,8 @@ H5FS__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
/* Expand percent */
UINT16DECODE(image, fspace->expand_percent);
- /* Size of address space free space sections are within
- * (log2 of actual value)
+ /* Size of address space free space sections are within
+ * (log2 of actual value)
*/
UINT16DECODE(image, fspace->max_sect_addr);
@@ -377,20 +377,20 @@ H5FS__cache_hdr_image_len(const void *_thing, size_t *image_len)
/*-------------------------------------------------------------------------
* Function: H5FS__cache_hdr_pre_serialize
*
- * Purpose: The free space manager header contains the address, size, and
+ * Purpose: The free space manager header contains the address, size, and
* allocation size of the free space manager section info. However,
* since it is possible for the section info to either not be allocated
* at all, or be allocated in temporary (AKA imaginary) files space,
* it is possible for the above mentioned fields to contain giberish
* when the free space manager header is serialized.
*
- * This function exists to prevent this problem. It does so by
+ * This function exists to prevent this problem. It does so by
* forcing allocation of real file space for the section information.
*
* Note that in the Version 2 cache, this problem was dealt with by
* simply flushing the section info before flushing the header. This
- * was possible, since the clients handled file I/O directly. As
- * this responsibility has moved to the cache in Version 3, this
+ * was possible, since the clients handled file I/O directly. As
+ * this responsibility has moved to the cache in Version 3, this
* solution is no longer directly applicable.
*
* Return: Success: SUCCEED
@@ -401,7 +401,7 @@ H5FS__cache_hdr_image_len(const void *_thing, size_t *image_len)
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing,
haddr_t addr, size_t H5_ATTR_UNUSED len, haddr_t *new_addr, size_t *new_len,
unsigned *flags)
@@ -432,23 +432,23 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing,
/* Set the ring type for the section info in the API context */
H5AC_set_ring(ring, &orig_ring);
- /* This implies that the header "owns" the section info.
+ /* This implies that the header "owns" the section info.
*
- * Unfortunately, the comments in the code are not clear as to
+ * Unfortunately, the comments in the code are not clear as to
* what this means, but from reviewing the code (most particularly
- * H5FS_close(), H5FS_sinfo_lock, and H5FS_sinfo_unlock()), I
- * gather that it means that the header is maintaining a pointer to
- * an instance of H5FS_sinfo_t in which free space data is
+ * H5FS_close(), H5FS_sinfo_lock, and H5FS_sinfo_unlock()), I
+ * gather that it means that the header is maintaining a pointer to
+ * an instance of H5FS_sinfo_t in which free space data is
* maintained, and either:
*
* 1) The instance of H5FS_sinfo_t is not in the metadata cache.
*
- * This will be TRUE iff H5F_addr_defined(fspace->sect_addr)
+ * This will be TRUE iff H5F_addr_defined(fspace->sect_addr)
* is FALSE, and fspace->sinfo is not NULL. This is sometimes
* referred to as "floating" section info in the comments.
*
- * If the section info structure contains free space data
- * that must be placed on disk eventually, then
+ * If the section info structure contains free space data
+ * that must be placed on disk eventually, then
*
* fspace->serial_sect_count > 0
*
@@ -457,57 +457,57 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing,
* H5F_addr_defined(fspace->addr)
*
* will both be TRUE. If this contition does not hold, then
- * either the free space info is not persistent
- * (!H5F_addr_defined(fspace->addr)???) or the section info
- * contains no free space data that must be written to file
+ * either the free space info is not persistent
+ * (!H5F_addr_defined(fspace->addr)???) or the section info
+ * contains no free space data that must be written to file
* ( fspace->serial_sect_count == 0 ).
*
* 2) The instance of H5FS_sinfo_t is in the metadata cache with
* address in temporary file space (AKA imaginary file space).
- * The entry may or may not be protected, and if protected, it
- * may be protected either RW or RO (as indicated by
+ * The entry may or may not be protected, and if protected, it
+ * may be protected either RW or RO (as indicated by
* fspace->sinfo_protected and fspace->sinfo_accmod).
*
* 3) The instance of H5FS_sinfo_t is in the metadata cache with
* address in real file space. As in case 2) above, the entry
- * may or may not be protected, and if protected, it
- * may be protected either RW or RO (as indicated by
+ * may or may not be protected, and if protected, it
+ * may be protected either RW or RO (as indicated by
* fspace->sinfo_protected and fspace->sinfo_accmod).
*
- * Observe that fspace->serial_sect_count > 0 must be TRUE in
- * cases 2) and 3), as the section info should not be stored on
+ * Observe that fspace->serial_sect_count > 0 must be TRUE in
+ * cases 2) and 3), as the section info should not be stored on
* disk if it doesn't exist. Similarly, since the section info
- * will not be stored to disk unless the header is,
+ * will not be stored to disk unless the header is,
* H5F_addr_defined(fspace->addr) must hold as well.
*
* As the objective is to touch up the free space manager header
- * so that it contains sensical data on the size and location of
+ * so that it contains sensical data on the size and location of
* the section information, we have to handle each of the above
* cases differently.
*
- * Case 1) If either fspace->serial_sect_count == 0 or
- * ! H5F_addr_defined(fspace->addr) do nothing as either
- * the free space manager data is not persistent, or the
+ * Case 1) If either fspace->serial_sect_count == 0 or
+ * ! H5F_addr_defined(fspace->addr) do nothing as either
+ * the free space manager data is not persistent, or the
* section info is empty.
*
* Otherwise, allocate space for the section info in real
- * file space, insert the section info at this location, and
- * set fspace->sect_addr, fspace->sect_size, and
+ * file space, insert the section info at this location, and
+ * set fspace->sect_addr, fspace->sect_size, and
* fspace->alloc_sect_size to reflect the new location
* of the section info. Note that it is not necessary to
* force a write of the section info.
*
* Case 2) Allocate space for the section info in real file space,
- * and tell the metadata cache to relocate the entry.
- * Update fspace->sect_addr, fspace->sect_size, and
+ * and tell the metadata cache to relocate the entry.
+ * Update fspace->sect_addr, fspace->sect_size, and
* fspace->alloc_sect_size to reflect the new location.
*
* Case 3) Nothing to be done in this case, although it is useful
* to perform sanity checks.
*
- * Note that while we may alter the contents of the free space
- * header in cases 1) and 2), there is no need to mark the header
- * as dirty, as the metadata cache would not be attempting to
+ * Note that while we may alter the contents of the free space
+ * header in cases 1) and 2), there is no need to mark the header
+ * as dirty, as the metadata cache would not be attempting to
* serialize the header if it thought it was clean.
*/
if(fspace->serial_sect_count > 0 && H5F_addr_defined(fspace->addr)) {
@@ -535,7 +535,7 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing,
HDassert(fspace->sinfo->cache_info.size == fspace->alloc_sect_size);
- /* the metadata cache is now managing the section info,
+ /* the metadata cache is now managing the section info,
* so set fspace->sinfo to NULL.
*/
fspace->sinfo = NULL;
@@ -584,11 +584,11 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing,
} /* end else */
} /* end if */
else if(H5F_addr_defined(fspace->sect_addr)) {
- /* Here the metadata cache is managing the section info.
+ /* Here the metadata cache is managing the section info.
*
- * Do some sanity checks, and then test to see if the section
- * info is in real file space. If it isn't relocate it into
- * real file space lest the header be written to file with
+ * Do some sanity checks, and then test to see if the section
+ * info is in real file space. If it isn't relocate it into
+ * real file space lest the header be written to file with
* a nonsense section info address.
*/
if(!H5F_POINT_OF_NO_RETURN(f)) {
@@ -601,12 +601,12 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing,
unsigned sect_status = 0;
haddr_t new_sect_addr;
- /* we have work to do -- must relocate section info into
+ /* we have work to do -- must relocate section info into
* real file space.
*
* Since the section info address is in temporary space (AKA
- * imaginary space), it follows that the entry must be in
- * cache. Further, since fspace->sinfo is NULL, it must be
+ * imaginary space), it follows that the entry must be in
+ * cache. Further, since fspace->sinfo is NULL, it must be
* unprotected and un-pinned. Start by verifying this.
*/
if(H5AC_get_entry_status(f, fspace->sect_addr, &sect_status) < 0)
@@ -632,7 +632,7 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing,
/* Update the internal address for the section info */
fspace->sect_addr = new_sect_addr;
- /* No need to mark the header dirty, as we are about to
+ /* No need to mark the header dirty, as we are about to
* serialize it.
*/
} /* end if */
@@ -660,7 +660,7 @@ done:
*
* Purpose: Given an instance of H5FS_t and a suitably sized buffer,
* serialize the contents of the instance of H5FS_t and write
- * its contents to the buffer. This buffer will be used to
+ * its contents to the buffer. This buffer will be used to
* write the image of the instance to file.
*
* Return: Success: SUCCEED
@@ -690,20 +690,20 @@ H5FS__cache_hdr_serialize(const H5F_t *f, void *_image, size_t len,
HDassert(fspace->cache_info.type == H5AC_FSPACE_HDR);
HDassert(fspace->hdr_size == len);
- /* The section information does not always exits, and if it does,
- * it is not always in the cache. To make matters more interesting,
+ /* The section information does not always exits, and if it does,
+ * it is not always in the cache. To make matters more interesting,
* even if it is in the cache, it may not be in real file space.
*
- * The pre-serialize function should have moved the section info
+ * The pre-serialize function should have moved the section info
* into real file space if necessary before this function was called.
* The following asserts are a cursory check on this.
*/
HDassert((! H5F_addr_defined(fspace->sect_addr)) || (! H5F_IS_TMP_ADDR(f, fspace->sect_addr)));
if(!H5F_POINT_OF_NO_RETURN(f))
- HDassert((! H5F_addr_defined(fspace->sect_addr)) ||
- ((fspace->serial_sect_count > 0) &&
- (fspace->sect_size > 0) &&
+ HDassert((! H5F_addr_defined(fspace->sect_addr)) ||
+ ((fspace->serial_sect_count > 0) &&
+ (fspace->sect_size > 0) &&
(fspace->alloc_sect_size == (size_t)fspace->sect_size)));
/* Magic number */
@@ -737,8 +737,8 @@ H5FS__cache_hdr_serialize(const H5F_t *f, void *_image, size_t len,
/* Expand percent */
UINT16ENCODE(image, fspace->expand_percent);
- /* Size of address space free space sections are within (log2 of
- * actual value)
+ /* Size of address space free space sections are within (log2 of
+ * actual value)
*/
UINT16ENCODE(image, fspace->max_sect_addr);
@@ -787,7 +787,7 @@ H5FS__cache_hdr_notify(H5AC_notify_action_t action, void *_thing)
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
-
+
/* Sanity check */
HDassert(fspace);
@@ -872,7 +872,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5FS__cache_sinfo_get_initial_load_size()
*
- * Purpose: Compute the size of the on disk image of the free space
+ * Purpose: Compute the size of the on disk image of the free space
* manager section info, and place this value in *image_len.
*
* Return: Success: SUCCEED
@@ -883,7 +883,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5FS__cache_sinfo_get_initial_load_size(void *_udata, size_t *image_len)
{
const H5FS_t *fspace; /* free space manager */
@@ -1136,9 +1136,9 @@ H5FS__cache_sinfo_image_len(const void *_thing, size_t *image_len)
/*-------------------------------------------------------------------------
* Function: H5FS__cache_sinfo_pre_serialize
*
- * Purpose: The objective of this function is to test to see if file space
- * for the section info is located in temporary (AKA imaginary) file
- * space. If it is, relocate file space for the section info to
+ * Purpose: The objective of this function is to test to see if file space
+ * for the section info is located in temporary (AKA imaginary) file
+ * space. If it is, relocate file space for the section info to
* regular file space.
*
* Return: Success: SUCCEED
@@ -1149,7 +1149,7 @@ H5FS__cache_sinfo_image_len(const void *_thing, size_t *image_len)
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5FS__cache_sinfo_pre_serialize(H5F_t *f, void *_thing, haddr_t addr,
size_t len, haddr_t *new_addr, size_t *new_len, unsigned *flags)
{
@@ -1225,8 +1225,8 @@ done:
* Function: H5FS__cache_sinfo_serialize
*
* Purpose: Given an instance of H5FS_sinfo_t and a suitably sized buffer,
- * serialize the contents of the instance of H5FS_sinfo_t and write
- * its contents to the buffer. This buffer will be used to write
+ * serialize the contents of the instance of H5FS_sinfo_t and write
+ * its contents to the buffer. This buffer will be used to write
* the image of the instance to file.
*
* Return: Success: SUCCEED
@@ -1237,7 +1237,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5FS__cache_sinfo_serialize(const H5F_t *f, void *_image, size_t len,
void *_thing)
{
@@ -1322,7 +1322,7 @@ H5FS__cache_sinfo_notify(H5AC_notify_action_t action, void *_thing)
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
-
+
/* Sanity check */
HDassert(sinfo);
@@ -1370,7 +1370,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5FS__cache_sinfo_free_icr
*
- * Purpose: Free the memory used for the in core representation of the
+ * Purpose: Free the memory used for the in core representation of the
* free space manager section info.
*
* Note: The metadata cache sets the object's cache_info.magic to
@@ -1385,7 +1385,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5FS__cache_sinfo_free_icr(void *_thing)
{
H5FS_sinfo_t *sinfo = (H5FS_sinfo_t *)_thing; /* Pointer to the object */
diff --git a/src/H5FSint.c b/src/H5FSint.c
index 264950d..6ba5748 100644
--- a/src/H5FSint.c
+++ b/src/H5FSint.c
@@ -121,7 +121,7 @@ H5FS__create_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
-
+
/* Sanity check */
HDassert(parent_entry);
HDassert(child_entry);
@@ -153,7 +153,7 @@ H5FS__destroy_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
-
+
/* Sanity check */
HDassert(parent_entry);
HDassert(child_entry);
@@ -161,7 +161,7 @@ H5FS__destroy_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)
/* Destroy a flush dependency between parent and child entry */
if(H5AC_destroy_flush_dependency(parent_entry, child_entry) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
-
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5FS__destroy_flush_depend() */
diff --git a/src/H5FSsection.c b/src/H5FSsection.c
index a58347f..bd35cbd 100644
--- a/src/H5FSsection.c
+++ b/src/H5FSsection.c
@@ -1212,7 +1212,7 @@ H5FS_sect_merge(H5FS_t *fspace, H5FS_section_info_t **sect, void *op_data)
/* Retarget section pointer to 'less than' node that was merged into */
*sect = tmp_sect;
- if(*sect == NULL)
+ if(*sect == NULL)
HGOTO_DONE(ret_value);
/* Indicate successful merge occurred */
@@ -1257,7 +1257,7 @@ H5FS_sect_merge(H5FS_t *fspace, H5FS_section_info_t **sect, void *op_data)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL, "can't merge two sections")
/* It's possible that the merge caused the section to be deleted (particularly in the paged allocation case) */
- if(*sect == NULL)
+ if(*sect == NULL)
HGOTO_DONE(ret_value);
/* Indicate successful merge occurred */
@@ -2462,9 +2462,9 @@ done:
* also re-inserts the header and section info in the metadata
* cache with this allocation.
*
- * When paged allocation is not enabled, allocation of space
- * for the free space manager header and section info is
- * straight forward -- we simply allocate the space directly
+ * When paged allocation is not enabled, allocation of space
+ * for the free space manager header and section info is
+ * straight forward -- we simply allocate the space directly
* from file driver.
*
* Note that if f->shared->alignment > 1, and EOA is not a
@@ -2475,23 +2475,23 @@ done:
* it will usually be reclaimed later.
*
* When page allocation is enabled, things are more difficult,
- * as there is the possibility that page buffering will be
+ * as there is the possibility that page buffering will be
* enabled when the free space managers are read. To allow
- * for this, we must ensure that space allocated for the
+ * for this, we must ensure that space allocated for the
* free space manager header and section info is either larger
* than a page, or resides completely within a page.
*
- * Do this by allocating space for the free space header and
- * section info starting at page boundaries, and extending
+ * Do this by allocating space for the free space header and
+ * section info starting at page boundaries, and extending
* allocation to the next page boundary. This of course wastes
* space, but see below.
*
- * On the first free space allocation / deallocation after the
- * next file open, we will read the self referential free space
- * managers, float them and reduce the EOA to its value prior
- * to allocation of file space for the self referential free
- * space managers on the preceeding file close. This EOA value
- * is stored in the free space manager superblock extension
+ * On the first free space allocation / deallocation after the
+ * next file open, we will read the self referential free space
+ * managers, float them and reduce the EOA to its value prior
+ * to allocation of file space for the self referential free
+ * space managers on the preceeding file close. This EOA value
+ * is stored in the free space manager superblock extension
* message.
*
* Return: Success: non-negative
@@ -2536,7 +2536,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace,
HDassert(f->shared->fs_persist);
/* At present, all free space strategies enable the free space managers.
- * This will probably change -- at which point this assertion should
+ * This will probably change -- at which point this assertion should
* be revisited.
*/
/* Updated: Only the following two strategies enable the free-space managers */
@@ -2559,7 +2559,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace,
hdr_alloc_size = H5FS_HEADER_SIZE(f);
- /* if page allocation is enabled, extend the hdr_alloc_size to the
+ /* if page allocation is enabled, extend the hdr_alloc_size to the
* next page boundary.
*/
if(H5F_PAGED_AGGR(f)) {
@@ -2599,7 +2599,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace,
sinfo_alloc_size = fspace->sect_size;
- /* if paged allocation is enabled, extend the sinfo_alloc_size to the
+ /* if paged allocation is enabled, extend the sinfo_alloc_size to the
* next page boundary.
*/
if(H5F_PAGED_AGGR(f)) {
diff --git a/src/H5Fdeprec.c b/src/H5Fdeprec.c
index 703c9cc..6464e62 100644
--- a/src/H5Fdeprec.c
+++ b/src/H5Fdeprec.c
@@ -153,10 +153,10 @@ done:
* bounds.
*
* Before release 1.10.2, the library supports only two
- * combinations of low/high bounds:
+ * combinations of low/high bounds:
* (earliest, latest)
* (latest, latest)
- * Thus, this public routine does the job in switching
+ * Thus, this public routine does the job in switching
* between the two combinations listed above.
*
* Starting release 1.10.2, we add v18 to the enumerated
@@ -174,7 +174,7 @@ done:
* Return: Non-negative on success/Negative on failure
*
* Programmer: Vailin Choi; December 2017
- *
+ *
*-------------------------------------------------------------------------
*/
herr_t
diff --git a/src/H5Fefc.c b/src/H5Fefc.c
index 14aa1c5..6c9a0b0 100644
--- a/src/H5Fefc.c
+++ b/src/H5Fefc.c
@@ -867,7 +867,7 @@ H5F__efc_try_close(H5F_t *f)
HGOTO_DONE(SUCCEED)
/* If the file EFC were locked, that should always mean that there exists
- * a reference to this file that is not in an EFC (it may have just been
+ * a reference to this file that is not in an EFC (it may have just been
* removed from an EFC), and should have been caught by the above check */
/* If we get here then we must be beginning a new run. Make sure that the
* temporary variables in f->shared->efc are at the default value */
diff --git a/src/H5Fint.c b/src/H5Fint.c
index 671a8e4..abc2840 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -2956,7 +2956,7 @@ H5F_set_retries(H5F_t *f)
/* Initialize the # of bins for retries */
f->shared->retries_nbins = 0;
if(f->shared->read_attempts > 1) {
- /* Use HDceil to ensure that the log10 value is rounded up to the
+ /* Use HDceil to ensure that the log10 value is rounded up to the
nearest integer before casting to unsigned */
tmp = HDceil(HDlog10((double)f->shared->read_attempts));
f->shared->retries_nbins = (unsigned)tmp;
diff --git a/src/H5Fio.c b/src/H5Fio.c
index 830b7ee..2fe7b43 100644
--- a/src/H5Fio.c
+++ b/src/H5Fio.c
@@ -172,7 +172,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5F_flush_tagged_metadata
*
- * Purpose: Flushes metadata with specified tag in the metadata cache
+ * Purpose: Flushes metadata with specified tag in the metadata cache
* to disk.
*
* Return: Non-negative on success/Negative on failure
diff --git a/src/H5Fmpi.c b/src/H5Fmpi.c
index f854d3b..7759430 100644
--- a/src/H5Fmpi.c
+++ b/src/H5Fmpi.c
@@ -290,8 +290,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5F_mpi_retrieve_comm
*
- * Purpose: Retrieves an MPI communicator from the file the location ID
- * is in. If the loc_id is invalid, the fapl_id is used to
+ * Purpose: Retrieves an MPI communicator from the file the location ID
+ * is in. If the loc_id is invalid, the fapl_id is used to
* retrieve the communicator.
*
* Return: Success: Non-negative
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index f9d3f53..e5a673f 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -166,8 +166,8 @@ typedef struct H5F_superblock_cache_ud_t {
unsigned btree_k[H5B_NUM_BTREE_ID]; /* B-tree key values for each type */
haddr_t stored_eof; /* End-of-file in file */
hbool_t drvrinfo_removed; /* Indicate if the driver info was removed */
- unsigned super_vers; /* Superblock version obtained in get_load_size callback.
- * It will be used later in verify_chksum callback
+ unsigned super_vers; /* Superblock version obtained in get_load_size callback.
+ * It will be used later in verify_chksum callback
*/
} H5F_superblock_cache_ud_t;
@@ -312,7 +312,7 @@ struct H5F_shared_t {
hsize_t fs_threshold; /* Free space section threshold */
hbool_t fs_persist; /* Free-space persist or not */
unsigned fs_version; /* Free-space version: */
- /* It is used to update fsinfo message in the superblock
+ /* It is used to update fsinfo message in the superblock
extension when closing down the free-space managers */
hbool_t use_tmp_space; /* Whether temp. file space allocation is allowed */
haddr_t tmp_addr; /* Next address to use for temp. space in the file */
diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c
index db5bb7d..0cb838f 100644
--- a/src/H5Fsuper_cache.c
+++ b/src/H5Fsuper_cache.c
@@ -269,7 +269,7 @@ H5F__drvrinfo_prefix_decode(H5O_drvinfo_t *drvrinfo, char *drv_name,
UINT32DECODE(image, drvrinfo->len);
/* Driver name and/or version */
- if(drv_name) {
+ if(drv_name) {
H5MM_memcpy(drv_name, (const char *)image, (size_t)8);
drv_name[8] = '\0';
image += 8; /* advance past name/version */
@@ -370,7 +370,7 @@ H5F__cache_superblock_get_final_load_size(const void *_image, size_t image_len,
udata->super_vers = sblock.super_vers;
/* Set the final size for the cache image */
- *actual_len = H5F_SUPERBLOCK_FIXED_SIZE +
+ *actual_len = H5F_SUPERBLOCK_FIXED_SIZE +
(size_t)H5F_SUPERBLOCK_VARLEN_SIZE(sblock.super_vers, sblock.sizeof_addr, sblock.sizeof_size);
done:
@@ -381,7 +381,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5F__cache_superblock_verify_chksum
*
- * Purpose: Verify the computed checksum of the data structure is the
+ * Purpose: Verify the computed checksum of the data structure is the
* same as the stored chksum.
*
* Return: Success: TRUE/FALSE
@@ -585,14 +585,14 @@ H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata,
H5F_addr_decode(udata->f, (const uint8_t **)&image, &udata->stored_eof/*out*/);
H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->root_addr/*out*/);
- /* checksum verification already done in verify_chksum cb */
+ /* checksum verification already done in verify_chksum cb */
/* Decode checksum */
UINT32DECODE(image, read_chksum);
/* The Driver Information Block may not appear with the version
- * 2 super block. Thus we set the driver_addr field of the in
- * core representation of the super block HADDR_UNDEF to prevent
+ * 2 super block. Thus we set the driver_addr field of the in
+ * core representation of the super block HADDR_UNDEF to prevent
* any attempt to load the Driver Information Block.
*/
sblock->driver_addr = HADDR_UNDEF;
@@ -676,11 +676,11 @@ H5F__cache_superblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNU
HDassert(image);
HDassert(sblock);
- /* Assert that the superblock is marked as being flushed last (and
+ /* Assert that the superblock is marked as being flushed last (and
collectively in parallel) */
/* (We'll rely on the cache to make sure it actually *is* flushed
last (and collectively in parallel), but this check doesn't hurt) */
- HDassert(sblock->cache_info.flush_me_last);
+ HDassert(sblock->cache_info.flush_me_last);
/* Encode the common portion of the file superblock for all versions */
H5MM_memcpy(image, H5F_SIGNATURE, (size_t)H5F_SIGNATURE_LEN);
diff --git a/src/H5G.c b/src/H5G.c
index b831c99..288239b 100644
--- a/src/H5G.c
+++ b/src/H5G.c
@@ -689,7 +689,7 @@ H5Gflush(hid_t group_id)
FUNC_ENTER_API(FAIL)
H5TRACE1("e", "i", group_id);
-
+
/* Check args */
if(NULL == (grp = (H5G_t *)H5I_object_verify(group_id, H5I_GROUP)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a group")
@@ -724,7 +724,7 @@ H5Grefresh(hid_t group_id)
{
H5G_t *grp; /* Group for this operation */
herr_t ret_value = SUCCEED; /* Return value */
-
+
FUNC_ENTER_API(FAIL)
H5TRACE1("e", "i", group_id);
diff --git a/src/H5Gcache.c b/src/H5Gcache.c
index 7387eae..41534ac 100644
--- a/src/H5Gcache.c
+++ b/src/H5Gcache.c
@@ -113,7 +113,7 @@ H5FL_SEQ_EXTERN(H5G_entry_t);
/*-------------------------------------------------------------------------
* Function: H5G__cache_node_get_initial_load_size()
*
- * Purpose: Determine the size of the on-disk image of the node, and
+ * Purpose: Determine the size of the on-disk image of the node, and
* return this value in *image_len.
*
* Return: Success: SUCCEED
@@ -149,10 +149,10 @@ H5G__cache_node_get_initial_load_size(void *_udata, size_t *image_len)
* node, allocate an instance of H5G_node_t, load the contence of the
* image into it, and return a pointer to the instance.
*
- * Note that deserializing the image requires access to the file
- * pointer, which is not included in the parameter list for this
- * callback. Finesse this issue by passing in the file pointer
- * twice to the H5AC_protect() call -- once as the file pointer
+ * Note that deserializing the image requires access to the file
+ * pointer, which is not included in the parameter list for this
+ * callback. Finesse this issue by passing in the file pointer
+ * twice to the H5AC_protect() call -- once as the file pointer
* proper, and again as the user data
*
* Return: Success: Pointer to in core representation
diff --git a/src/H5Gcompact.c b/src/H5Gcompact.c
index 6d66274..44d505d 100644
--- a/src/H5Gcompact.c
+++ b/src/H5Gcompact.c
@@ -60,7 +60,7 @@ typedef struct {
/* PRIVATE PROTOTYPES */
static herr_t H5G_compact_build_table_cb(const void *_mesg, unsigned idx, void *_udata);
-static herr_t H5G__compact_build_table(const H5O_loc_t *oloc,
+static herr_t H5G__compact_build_table(const H5O_loc_t *oloc,
const H5O_linfo_t *linfo, H5_index_t idx_type, H5_iter_order_t order,
H5G_link_table_t *ltable);
diff --git a/src/H5Gint.c b/src/H5Gint.c
index c54b341..af37849 100644
--- a/src/H5Gint.c
+++ b/src/H5Gint.c
@@ -527,9 +527,9 @@ H5G_close(H5G_t *grp)
/* Evict group metadata if evicting on close */
if(!file_closed && H5F_SHARED(grp->oloc.file) && H5F_EVICT_ON_CLOSE(grp->oloc.file)) {
- if(H5AC_flush_tagged_metadata(grp->oloc.file, grp->oloc.addr) < 0)
+ if(H5AC_flush_tagged_metadata(grp->oloc.file, grp->oloc.addr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush tagged metadata")
- if(H5AC_evict_tagged_metadata(grp->oloc.file, grp->oloc.addr, FALSE) < 0)
+ if(H5AC_evict_tagged_metadata(grp->oloc.file, grp->oloc.addr, FALSE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to evict tagged metadata")
} /* end if */
diff --git a/src/H5Gnode.c b/src/H5Gnode.c
index b79b7d2..1566057 100644
--- a/src/H5Gnode.c
+++ b/src/H5Gnode.c
@@ -538,7 +538,7 @@ H5G_node_found(H5F_t *f, haddr_t addr, const void H5_ATTR_UNUSED *_lt_key,
rt = sn->nsyms;
while(lt < rt && cmp) {
idx = (lt + rt) / 2;
-
+
if((s = (const char *)H5HL_offset_into(udata->common.heap, sn->entry[idx].name_off)) == NULL)
HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to get symbol table name")
cmp = HDstrcmp(udata->common.name, s);
@@ -1451,7 +1451,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5G__node_iterate_size(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key,
+H5G__node_iterate_size(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key,
haddr_t H5_ATTR_UNUSED addr, const void H5_ATTR_UNUSED *_rt_key, void *_udata)
{
hsize_t *stab_size = (hsize_t *)_udata; /* User data */
diff --git a/src/H5Gpkg.h b/src/H5Gpkg.h
index a6b03eb..e8c3cc7 100644
--- a/src/H5Gpkg.h
+++ b/src/H5Gpkg.h
@@ -373,7 +373,7 @@ H5_DLL ssize_t H5G__stab_get_name_by_idx(const H5O_loc_t *oloc, H5_iter_order_t
hsize_t n, char* name, size_t size);
H5_DLL herr_t H5G__stab_remove(const H5O_loc_t *oloc, H5RS_str_t *grp_full_path_r,
const char *name);
-H5_DLL herr_t H5G__stab_remove_by_idx(const H5O_loc_t *oloc,
+H5_DLL herr_t H5G__stab_remove_by_idx(const H5O_loc_t *oloc,
H5RS_str_t *grp_full_path_r, H5_iter_order_t order, hsize_t n);
H5_DLL herr_t H5G__stab_lookup(const H5O_loc_t *grp_oloc, const char *name,
H5O_link_t *lnk);
@@ -450,7 +450,7 @@ H5_DLL herr_t H5G__compact_lookup_by_idx(const H5O_loc_t *oloc,
const H5O_linfo_t *linfo, H5_index_t idx_type, H5_iter_order_t order,
hsize_t n, H5O_link_t *lnk);
#ifndef H5_NO_DEPRECATED_SYMBOLS
-H5_DLL H5G_obj_t H5G__compact_get_type_by_idx(H5O_loc_t *oloc,
+H5_DLL H5G_obj_t H5G__compact_get_type_by_idx(H5O_loc_t *oloc,
const H5O_linfo_t *linfo, hsize_t idx);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
diff --git a/src/H5Gprivate.h b/src/H5Gprivate.h
index 95dfef5..b783491 100644
--- a/src/H5Gprivate.h
+++ b/src/H5Gprivate.h
@@ -215,7 +215,7 @@ H5_DLL herr_t H5G_iterate(hid_t loc_id, const char *group_name,
H5_DLL herr_t H5G_visit(hid_t loc_id, const char *group_name,
H5_index_t idx_type, H5_iter_order_t order, H5L_iterate_t op, void *op_data);
-/*
+/*
* Functions that understand links in groups
*/
H5_DLL herr_t H5G_link_to_info(const struct H5O_link_t *lnk, H5L_info_t *linfo);
diff --git a/src/H5Gtraverse.c b/src/H5Gtraverse.c
index c2bf607..8e6c7f9 100644
--- a/src/H5Gtraverse.c
+++ b/src/H5Gtraverse.c
@@ -840,9 +840,9 @@ H5G_traverse(const H5G_loc_t *loc, const char *name, unsigned target, H5G_traver
/* Set up invalid tag. This is a precautionary step only. Setting an invalid
tag here will ensure that no metadata accessed while doing the traversal
- is given an improper tag, unless another one is specifically set up
- first. This will ensure we're not accidentally tagging something we
- shouldn't be during the traversal. Note that for best tagging assertion
+ is given an improper tag, unless another one is specifically set up
+ first. This will ensure we're not accidentally tagging something we
+ shouldn't be during the traversal. Note that for best tagging assertion
coverage, setting H5C_DO_TAGGING_SANITY_CHECKS is advised. */
H5_BEGIN_TAG(H5AC__INVALID_TAG);
diff --git a/src/H5HFbtree2.c b/src/H5HFbtree2.c
index 5807c56..479c2bd 100644
--- a/src/H5HFbtree2.c
+++ b/src/H5HFbtree2.c
@@ -360,7 +360,7 @@ H5HF__huge_bt2_indir_compare(const void *_rec1, const void *_rec2, int *result)
{
FUNC_ENTER_STATIC_NOERR
- *result = (int)(((const H5HF_huge_bt2_indir_rec_t *)_rec1)->id -
+ *result = (int)(((const H5HF_huge_bt2_indir_rec_t *)_rec1)->id -
((const H5HF_huge_bt2_indir_rec_t *)_rec2)->id);
FUNC_LEAVE_NOAPI(SUCCEED)
@@ -563,7 +563,7 @@ H5HF__huge_bt2_filt_indir_compare(const void *_rec1, const void *_rec2, int *res
{
FUNC_ENTER_STATIC_NOERR
- *result = (int)(((const H5HF_huge_bt2_filt_indir_rec_t *)_rec1)->id -
+ *result = (int)(((const H5HF_huge_bt2_filt_indir_rec_t *)_rec1)->id -
((const H5HF_huge_bt2_filt_indir_rec_t *)_rec2)->id);
FUNC_LEAVE_NOAPI(SUCCEED)
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index 2d1c1f2..59c9df4 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -77,35 +77,35 @@ static herr_t H5HF__cache_hdr_get_final_load_size(const void *image_ptr,
size_t image_len, void *udata, size_t *actual_len);
static htri_t H5HF__cache_hdr_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5HF__cache_hdr_deserialize(const void *image, size_t len,
- void *udata, hbool_t *dirty);
+ void *udata, hbool_t *dirty);
static herr_t H5HF__cache_hdr_image_len(const void *thing, size_t *image_len);
static herr_t H5HF__cache_hdr_pre_serialize(H5F_t *f, void *thing, haddr_t addr,
- size_t len, haddr_t *new_addr, size_t *new_len, unsigned *flags);
+ size_t len, haddr_t *new_addr, size_t *new_len, unsigned *flags);
static herr_t H5HF__cache_hdr_serialize(const H5F_t *f, void *image,
- size_t len, void *thing);
+ size_t len, void *thing);
static herr_t H5HF__cache_hdr_free_icr(void *thing);
static herr_t H5HF__cache_iblock_get_initial_load_size(void *udata, size_t *image_len);
static htri_t H5HF__cache_iblock_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5HF__cache_iblock_deserialize(const void *image, size_t len,
- void *udata, hbool_t *dirty);
+ void *udata, hbool_t *dirty);
static herr_t H5HF__cache_iblock_image_len(const void *thing, size_t *image_len);
static herr_t H5HF__cache_iblock_pre_serialize(H5F_t *f, void *thing,
- haddr_t addr, size_t len, haddr_t *new_addr, size_t *new_len, unsigned *flags);
+ haddr_t addr, size_t len, haddr_t *new_addr, size_t *new_len, unsigned *flags);
static herr_t H5HF__cache_iblock_serialize(const H5F_t *f, void *image,
- size_t len, void *thing);
-static herr_t H5HF__cache_iblock_notify(H5AC_notify_action_t action, void *thing);
+ size_t len, void *thing);
+static herr_t H5HF__cache_iblock_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5HF__cache_iblock_free_icr(void *thing);
static herr_t H5HF__cache_dblock_get_initial_load_size(void *udata, size_t *image_len);
static htri_t H5HF__cache_dblock_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5HF__cache_dblock_deserialize(const void *image, size_t len,
- void *udata, hbool_t *dirty);
+ void *udata, hbool_t *dirty);
static herr_t H5HF__cache_dblock_image_len(const void *thing, size_t *image_len);
static herr_t H5HF__cache_dblock_pre_serialize(H5F_t *f, void *thing, haddr_t addr,
- size_t len, haddr_t *new_addr, size_t *new_len, unsigned *flags);
+ size_t len, haddr_t *new_addr, size_t *new_len, unsigned *flags);
static herr_t H5HF__cache_dblock_serialize(const H5F_t *f, void *image,
- size_t len, void *thing);
+ size_t len, void *thing);
static herr_t H5HF__cache_dblock_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5HF__cache_dblock_free_icr(void *thing);
static herr_t H5HF__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size);
@@ -114,14 +114,14 @@ static herr_t H5HF__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size)
#ifndef NDEBUG
static herr_t H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
hbool_t *fd_clean, hbool_t *clean);
-static herr_t H5HF__cache_verify_iblock_descendants_clean(H5F_t *f,
+static herr_t H5HF__cache_verify_iblock_descendants_clean(H5F_t *f,
haddr_t fd_parent_addr, H5HF_indirect_t *iblock, unsigned *iblock_status,
hbool_t *fd_clean, hbool_t *clean);
-static herr_t H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f,
- haddr_t fd_parent_addr, H5HF_indirect_t *iblock, hbool_t *fd_clean,
+static herr_t H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f,
+ haddr_t fd_parent_addr, H5HF_indirect_t *iblock, hbool_t *fd_clean,
hbool_t *clean, hbool_t *has_dblocks);
-static herr_t H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f,
- haddr_t fd_parent_addr, H5HF_indirect_t *iblock,
+static herr_t H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f,
+ haddr_t fd_parent_addr, H5HF_indirect_t *iblock,
hbool_t *fd_clean, hbool_t *clean, hbool_t *has_iblocks);
#endif /* NDEBUG */
@@ -212,7 +212,7 @@ H5FL_BLK_DEFINE(direct_block);
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__hdr_prefix_decode(H5HF_hdr_t *hdr, const uint8_t **image_ref)
{
const uint8_t *image = *image_ref; /* Pointer into into supplied image */
@@ -351,8 +351,8 @@ H5HF__dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dtable)
* Purpose: Determine the size of the fractal heap header on disk,
* and set *image_len to this value.
*
- * Note also that the value returned by this function presumes that
- * there is no I/O filtering data in the header. If there is, the
+ * Note also that the value returned by this function presumes that
+ * there is no I/O filtering data in the header. If there is, the
* size reported will be too small, and H5C_load_entry()
* will have to make two tries to load the fractal heap header.
*
@@ -364,7 +364,7 @@ H5HF__dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dtable)
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_hdr_get_initial_load_size(void *_udata, size_t *image_len)
{
H5HF_hdr_cache_ud_t *udata = (H5HF_hdr_cache_ud_t *)_udata; /* Pointer to user data */
@@ -402,7 +402,7 @@ H5HF__cache_hdr_get_initial_load_size(void *_udata, size_t *image_len)
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_hdr_get_final_load_size(const void *_image, size_t image_len,
void *_udata, size_t *actual_len)
{
@@ -609,11 +609,11 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HF__cache_hdr_image_len
*
- * Purpose: Return the actual size of the fractal heap header on
- * disk image.
+ * Purpose: Return the actual size of the fractal heap header on
+ * disk image.
*
- * If the header contains filter information, this size will be
- * larger than the value returned by H5HF__cache_hdr_get_initial_load_size().
+ * If the header contains filter information, this size will be
+ * larger than the value returned by H5HF__cache_hdr_get_initial_load_size().
*
* Return: Success: SUCCEED
* Failure: FAIL
@@ -623,7 +623,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_hdr_image_len(const void *_thing, size_t *image_len)
{
const H5HF_hdr_t *hdr = (const H5HF_hdr_t *)_thing; /* Fractal heap info */
@@ -645,13 +645,13 @@ H5HF__cache_hdr_image_len(const void *_thing, size_t *image_len)
/*-------------------------------------------------------------------------
* Function: H5HF__cache_hdr_pre_serialize
*
- * Purpose: As best I can tell, fractal heap header blocks are always
+ * Purpose: As best I can tell, fractal heap header blocks are always
* allocated in real file space. Thus this routine simply verifies
* this, verifies that the len parameter contains the expected
* value, and returns an error if either of these checks fail.
*
* When compiled in debug mode, the function also verifies that all
- * indirect and direct blocks that are children of the header are
+ * indirect and direct blocks that are children of the header are
* either clean, or not in the metadata cache.
*
* Return: Success: SUCCEED
@@ -662,7 +662,7 @@ H5HF__cache_hdr_image_len(const void *_thing, size_t *image_len)
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_hdr_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t len,
haddr_t H5_ATTR_UNUSED *new_addr, size_t H5_ATTR_UNUSED *new_len,
unsigned *flags)
@@ -706,8 +706,8 @@ H5HF__cache_hdr_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t len,
* is made during a cache serialization instead of an entry or cache
* flush.
*
- * Note also that with the recent change in the definition of flush
- * dependency, not all descendants need be clean -- only direct flush
+ * Note also that with the recent change in the definition of flush
+ * dependency, not all descendants need be clean -- only direct flush
* dependency children.
*
* Finally, observe that the H5HF__cache_verify_hdr_descendants_clean()
@@ -736,7 +736,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HF__cache_hdr_serialize
*
- * Purpose: Construct the on disk image of the header, and place it in
+ * Purpose: Construct the on disk image of the header, and place it in
* the buffer pointed to by image. Return SUCCEED on success,
* and FAIL on failure.
*
@@ -748,7 +748,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_hdr_serialize(const H5F_t *f, void *_image, size_t len,
void *_thing)
{
@@ -846,7 +846,7 @@ done:
*
* Purpose: Free the in core representation of the fractal heap header.
*
- * This routine frees just the header itself, not the
+ * This routine frees just the header itself, not the
* associated version 2 B-Tree, the associated Free Space Manager,
* nor the indirect/direct block tree that is rooted in the header.
*
@@ -865,7 +865,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_hdr_free_icr(void *_thing)
{
H5HF_hdr_t *hdr = (H5HF_hdr_t *)_thing; /* Fractal heap info */
@@ -890,7 +890,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HF__cache_iblock_get_initial_load_size()
*
- * Purpose: Compute the size of the on disk image of the indirect
+ * Purpose: Compute the size of the on disk image of the indirect
* block, and place this value in *image_len.
*
* Return: Success: SUCCEED
@@ -901,7 +901,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_iblock_get_initial_load_size(void *_udata, size_t *image_len)
{
H5HF_iblock_cache_ud_t *udata = (H5HF_iblock_cache_ud_t *)_udata; /* User data for callback */
@@ -913,7 +913,7 @@ H5HF__cache_iblock_get_initial_load_size(void *_udata, size_t *image_len)
HDassert(udata->par_info);
HDassert(udata->par_info->hdr);
HDassert(image_len);
-
+
/* Set the image length size */
*image_len = (size_t)H5HF_MAN_INDIRECT_SIZE(udata->par_info->hdr, *udata->nrows);
@@ -961,9 +961,9 @@ H5HF__cache_iblock_verify_chksum(const void *_image, size_t len, void H5_ATTR_UN
/*-------------------------------------------------------------------------
* Function: H5HF__cache_iblock_deserialize
*
- * Purpose: Given a buffer containing the on disk image of the indirect
- * block, allocate an instance of H5HF_indirect_t, load the data
- * in the buffer into this new instance, and return a pointer to
+ * Purpose: Given a buffer containing the on disk image of the indirect
+ * block, allocate an instance of H5HF_indirect_t, load the data
+ * in the buffer into this new instance, and return a pointer to
* it.
*
* As best I can tell, the size of the indirect block image is fully
@@ -1164,7 +1164,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_iblock_image_len(const void *_thing, size_t *image_len)
{
const H5HF_indirect_t *iblock = (const H5HF_indirect_t *)_thing; /* Indirect block info */
@@ -1188,11 +1188,11 @@ H5HF__cache_iblock_image_len(const void *_thing, size_t *image_len)
*
* Purpose: The primary objective of this function is to determine if the
* indirect block is currently allocated in temporary file space,
- * and if so, to move it to real file space before the entry is
+ * and if so, to move it to real file space before the entry is
* serialized.
*
- * In debug compiles, this function also verifies that all
- * immediate flush dependency children of this indirect block
+ * In debug compiles, this function also verifies that all
+ * immediate flush dependency children of this indirect block
* are either clean or are not in cache.
*
* Return: Success: SUCCEED
@@ -1203,7 +1203,7 @@ H5HF__cache_iblock_image_len(const void *_thing, size_t *image_len)
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_iblock_pre_serialize(H5F_t *f, void *_thing, haddr_t addr,
size_t H5_ATTR_UNUSED len, haddr_t *new_addr, size_t H5_ATTR_UNUSED *new_len,
unsigned *flags)
@@ -1237,7 +1237,7 @@ H5HF__cache_iblock_pre_serialize(H5F_t *f, void *_thing, haddr_t addr,
unsigned iblock_status = 0;
/* verify that flush dependencies are working correctly. Do this
- * by verifying that all immediate flush dependency children of this
+ * by verifying that all immediate flush dependency children of this
* iblock are clean.
*/
if(H5AC_get_entry_status(f, iblock->addr, &iblock_status) < 0)
@@ -1254,7 +1254,7 @@ H5HF__cache_iblock_pre_serialize(H5F_t *f, void *_thing, haddr_t addr,
}
#endif /* NDEBUG */
- /* Check to see if we must re-allocate the iblock from temporary to
+ /* Check to see if we must re-allocate the iblock from temporary to
* normal (AKA real) file space.
*/
if(H5F_IS_TMP_ADDR(f, addr)) {
@@ -1302,7 +1302,7 @@ H5HF__cache_iblock_pre_serialize(H5F_t *f, void *_thing, haddr_t addr,
*new_addr = iblock_addr;
*flags = H5AC__SERIALIZE_MOVED_FLAG;
} /* end if */
- else
+ else
*flags = 0;
done:
@@ -1313,8 +1313,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HF__cache_iblock_serialize
*
- * Purpose: Given a pointer to an iblock, and a pointer to a buffer of
- * the appropriate size, write the contents of the iblock to the
+ * Purpose: Given a pointer to an iblock, and a pointer to a buffer of
+ * the appropriate size, write the contents of the iblock to the
* buffer in format appropriate for writing to disk.
*
* Return: Success: SUCCEED
@@ -1325,7 +1325,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_iblock_serialize(const H5F_t *f, void *_image, size_t len,
void *_thing)
{
@@ -1390,7 +1390,7 @@ H5HF__cache_iblock_serialize(const H5F_t *f, void *_image, size_t len,
/* (either both the address & size are defined or both are
* not defined)
*/
- HDassert((H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size)
+ HDassert((H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size)
|| (!H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size == 0));
/* Size of filtered direct block */
@@ -1431,11 +1431,11 @@ H5HF__cache_iblock_serialize(const H5F_t *f, void *_image, size_t len,
/*-------------------------------------------------------------------------
* Function: H5HF__cache_iblock_notify
*
- * Purpose: This function is used to create and destroy flush dependency
+ * Purpose: This function is used to create and destroy flush dependency
* relationships between iblocks and their parents as indirect blocks
* are loaded / inserted and evicted from the metadata cache.
*
- * In general, the parent will be another iblock, but it may be the
+ * In general, the parent will be another iblock, but it may be the
* header if the iblock in question is the root iblock.
*
* Return: Success: SUCCEED
@@ -1446,7 +1446,7 @@ H5HF__cache_iblock_serialize(const H5F_t *f, void *_image, size_t len,
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_iblock_notify(H5AC_notify_action_t action, void *_thing)
{
H5HF_indirect_t *iblock = (H5HF_indirect_t *)_thing; /* Indirect block info */
@@ -1528,7 +1528,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HF__cache_iblock_free_icr
*
- * Purpose: Unlink the supplied instance of H5HF_indirect_t from the
+ * Purpose: Unlink the supplied instance of H5HF_indirect_t from the
* fractal heap and free its memory.
*
* Note: The metadata cache sets the object's cache_info.magic to
@@ -1543,7 +1543,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_iblock_free_icr(void *thing)
{
H5HF_indirect_t *iblock = (H5HF_indirect_t *)thing; /* Fractal heap indirect block to free */
@@ -1570,7 +1570,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HF__cache_dblock_get_initial_load_size()
*
- * Purpose: Determine the size of the direct block on disk image, and
+ * Purpose: Determine the size of the direct block on disk image, and
* return it in *image_len.
*
* Return: Success: SUCCEED
@@ -1581,7 +1581,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_dblock_get_initial_load_size(void *_udata, size_t *image_len)
{
const H5HF_dblock_cache_ud_t *udata = (const H5HF_dblock_cache_ud_t *)_udata; /* User data for callback */
@@ -1612,7 +1612,7 @@ H5HF__cache_dblock_get_initial_load_size(void *_udata, size_t *image_len)
} /* end if */
else
*image_len = udata->dblock_size;
-
+
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5HF__cache_dblock_get_initial_load_size() */
@@ -1668,7 +1668,7 @@ H5HF__cache_dblock_verify_chksum(const void *_image, size_t len, void *_udata)
filter_cb.func = NULL; /* no callback function when failed */
/* Allocate buffer to perform I/O filtering on and copy image into
- * it. Must do this as H5Z_pipeline() may re-size the buffer
+ * it. Must do this as H5Z_pipeline() may re-size the buffer
* provided to it.
*/
if(NULL == (read_buf = H5MM_malloc(len)))
@@ -1801,7 +1801,7 @@ H5HF__cache_dblock_deserialize(const void *_image, size_t len, void *_udata,
/* Check for I/O filters on this heap */
if(hdr->filter_len > 0) {
/* Direct block is already decompressed in verify_chksum callback */
- if(udata->decompressed) {
+ if(udata->decompressed) {
/* Sanity check */
HDassert(udata->dblk);
@@ -1822,7 +1822,7 @@ H5HF__cache_dblock_deserialize(const void *_image, size_t len, void *_udata,
filter_cb.func = NULL; /* no callback function when failed */
/* Allocate buffer to perform I/O filtering on and copy image into
- * it. Must do this as H5Z_pipeline() may resize the buffer
+ * it. Must do this as H5Z_pipeline() may resize the buffer
* provided to it.
*/
if (NULL == (read_buf = H5MM_malloc(len)))
@@ -1926,7 +1926,7 @@ done:
* Function: H5HF__cache_dblock_image_len
*
* Purpose: Report the actual size of the direct block image on disk.
- * Note that this value will probably be incorrect if compression
+ * Note that this value will probably be incorrect if compression
* is enabled and the entry is dirty.
*
* Return: Success: SUCCEED
@@ -1937,7 +1937,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_dblock_image_len(const void *_thing, size_t *image_len)
{
const H5HF_direct_t *dblock = (const H5HF_direct_t *)_thing; /* Direct block info */
@@ -1958,32 +1958,32 @@ H5HF__cache_dblock_image_len(const void *_thing, size_t *image_len)
/* Check for I/O filters on this heap */
if(hdr->filter_len > 0) {
- /*
+ /*
* If the data is available, set to the compressed
- * size of the direct block -- otherwise set it equal to the
- * uncompressed size.
+ * size of the direct block -- otherwise set it equal to the
+ * uncompressed size.
*
* We have three possible scenarios here.
*
* First, the block may never have been flushed. In this
- * case, both dblock->file_size and the size stored in the
- * parent (either the header or the parent iblock) will all
- * be zero. In this case, return the uncompressed size
+ * case, both dblock->file_size and the size stored in the
+ * parent (either the header or the parent iblock) will all
+ * be zero. In this case, return the uncompressed size
* stored in dblock->size as the size.
*
* Second, the block may have just been serialized, in which
- * case, dblock->file_size should be zero, and the correct
+ * case, dblock->file_size should be zero, and the correct
* on disk size should be stored in the parent (again, either
* the header or the parent iblock as case may be).
- *
- * Third, we may be in the process of discarding this
+ *
+ * Third, we may be in the process of discarding this
* dblock without writing it. In this case, dblock->file_size
- * should be non-zero and have the correct size. Note that
+ * should be non-zero and have the correct size. Note that
* in this case, the direct block will have been detached,
* and thus looking up the parent will likely return incorrect
* data.
*/
- if(dblock->file_size != 0)
+ if(dblock->file_size != 0)
size = dblock->file_size;
else {
const H5HF_indirect_t *par_iblock = dblock->parent; /* Parent iblock */
@@ -2012,54 +2012,54 @@ H5HF__cache_dblock_image_len(const void *_thing, size_t *image_len)
* Function: H5HF__cache_dblock_pre_serialize
*
* Purpose: In principle, the purpose of this function is to determine
- * the size and location of the disk image of the target direct
+ * the size and location of the disk image of the target direct
* block. In this case, the uncompressed size of the block is
- * fixed, but since the direct block could be compressed,
+ * fixed, but since the direct block could be compressed,
* we may need to compute and report the compressed size.
*
- * This is a bit sticky in the case of a direct block when I/O
+ * This is a bit sticky in the case of a direct block when I/O
* filters are enabled, as the size of the compressed version
- * of the on disk image is not known until the direct block has
- * been run through the filters. Further, the location of the
- * on disk image may change if the compressed size of the image
+ * of the on disk image is not known until the direct block has
+ * been run through the filters. Further, the location of the
+ * on disk image may change if the compressed size of the image
* changes as well.
*
- * To complicate matters further, the direct block may have been
- * initially allocated in temporary (AKA imaginary) file space.
- * In this case, we must relocate the direct block's on-disk
- * image to "real" file space regardless of whether it has changed
+ * To complicate matters further, the direct block may have been
+ * initially allocated in temporary (AKA imaginary) file space.
+ * In this case, we must relocate the direct block's on-disk
+ * image to "real" file space regardless of whether it has changed
* size.
*
- * One simplifying factor is the direct block's "blk" field,
+ * One simplifying factor is the direct block's "blk" field,
* which contains a pointer to a buffer which (with the exception
- * of a small header) contains the on disk image in uncompressed
+ * of a small header) contains the on disk image in uncompressed
* form.
*
- * To square this particular circle, this function does
- * everything the serialize function usually does, with the
- * exception of copying the image into the image buffer provided
- * to the serialize function by the metadata cache. The data to
+ * To square this particular circle, this function does
+ * everything the serialize function usually does, with the
+ * exception of copying the image into the image buffer provided
+ * to the serialize function by the metadata cache. The data to
* copy is provided to the serialize function in a buffer pointed
* to by the write_buf field.
*
- * If I/O filters are enabled, on exit,
- * H5HF__cache_dblock_pre_serialize() sets the write_buf field to
+ * If I/O filters are enabled, on exit,
+ * H5HF__cache_dblock_pre_serialize() sets the write_buf field to
* point to a buffer containing the filtered image of the direct
* block. The serialize function should free this block, and set
- * the write_buf field to NULL after copying it into the image
+ * the write_buf field to NULL after copying it into the image
* buffer provided by the metadata cache.
*
- * If I/O filters are not enabled, this function prepares
- * the buffer pointed to by the blk field for copying to the
- * image buffer provided by the metadata cache, and sets the
- * write_buf field equal to the blk field. In this case, the
- * serialize function should simply set the write_buf field to
- * NULL after copying the direct block image into the image
+ * If I/O filters are not enabled, this function prepares
+ * the buffer pointed to by the blk field for copying to the
+ * image buffer provided by the metadata cache, and sets the
+ * write_buf field equal to the blk field. In this case, the
+ * serialize function should simply set the write_buf field to
+ * NULL after copying the direct block image into the image
* buffer.
*
- * In both of the above cases, the length of the buffer pointed
- * to by write_buf is provided in the write_len field. This
- * field must contain 0 on entry to this function, and should
+ * In both of the above cases, the length of the buffer pointed
+ * to by write_buf is provided in the write_len field. This
+ * field must contain 0 on entry to this function, and should
* be set back to 0 at the end of the serialize function.
*
* Return: Success: SUCCEED
@@ -2070,7 +2070,7 @@ H5HF__cache_dblock_image_len(const void *_thing, size_t *image_len)
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
haddr_t addr, size_t len, haddr_t *new_addr, size_t *new_len, unsigned *flags)
{
@@ -2114,10 +2114,10 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
if(dblock->parent) {
- /* this is the common case, in which the direct block is the child
+ /* this is the common case, in which the direct block is the child
* of an indirect block. Set up the convenience variables we will
- * need if the address and/or compressed size of the on disk image
- * of the direct block changes, and do some sanity checking in
+ * need if the address and/or compressed size of the on disk image
+ * of the direct block changes, and do some sanity checking in
* passing.
*/
par_iblock = dblock->parent;
@@ -2137,8 +2137,8 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
at_tmp_addr = H5F_IS_TMP_ADDR(f, addr);
/* Begin by preping the direct block to be written to disk. Do
- * this by writing the correct magic number, the dblock version,
- * the address of the header, the offset of the block in the heap,
+ * this by writing the correct magic number, the dblock version,
+ * the address of the header, the offset of the block in the heap,
* and the checksum at the beginning of the block.
*/
@@ -2172,7 +2172,7 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
UINT32ENCODE(image, metadata_chksum);
} /* end if */
- /* at this point, dblock->blk should point to an uncompressed image of
+ /* at this point, dblock->blk should point to an uncompressed image of
* the direct block. If I/O filters are not enabled, this image should
* be ready to hand off to the metadata cache.
*/
@@ -2211,10 +2211,10 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
/* Use the compressed number of bytes as the size to write */
write_size = nbytes;
- /* If the size and/or location of the on disk image of the
+ /* If the size and/or location of the on disk image of the
* direct block changes, we must touch up its parent to reflect
* these changes. Do this differently depending on whether the
- * direct block's parent is an indirect block or (rarely) the
+ * direct block's parent is an indirect block or (rarely) the
* fractal heap header. In this case, the direct block is known
* as a root direct block.
*/
@@ -2233,7 +2233,7 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
hdr_changed = TRUE;
} /* end if */
- /* verify that the cache's last record of the compressed
+ /* verify that the cache's last record of the compressed
* size matches the heap's last record. This value will
* likely change shortly.
*/
@@ -2241,10 +2241,10 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
/* Check if we need to re-size the block on disk */
if(hdr->pline_root_direct_size != write_size || at_tmp_addr) {
- /* Check if the direct block is NOT currently allocated
- * in temp. file space
+ /* Check if the direct block is NOT currently allocated
+ * in temp. file space
*
- * (temp. file space does not need to be freed)
+ * (temp. file space does not need to be freed)
*/
if(!at_tmp_addr)
/* Release direct block's current disk space */
@@ -2255,8 +2255,8 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
if(HADDR_UNDEF == (dblock_addr = H5MF_alloc((H5F_t *)f, H5FD_MEM_FHEAP_DBLOCK, (hsize_t)write_size)))
HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap direct block")
- /* Update information about compressed direct block's
- * location & size
+ /* Update information about compressed direct block's
+ * location & size
*/
HDassert(hdr->man_dtable.table_addr == addr);
HDassert(hdr->pline_root_direct_size == len);
@@ -2285,7 +2285,7 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
par_changed = TRUE;
} /* end if */
- /* verify that the cache's last record of the compressed
+ /* verify that the cache's last record of the compressed
* size matches the heap's last record. This value will
* likely change shortly.
*/
@@ -2293,10 +2293,10 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
/* Check if we need to re-size the block on disk */
if(par_iblock->filt_ents[par_entry].size != write_size || at_tmp_addr) {
- /* Check if the direct block is NOT currently allocated
- * in temp. file space
+ /* Check if the direct block is NOT currently allocated
+ * in temp. file space
*
- * (temp. file space does not need to be freed)
+ * (temp. file space does not need to be freed)
*/
if(!at_tmp_addr)
/* Release direct block's current disk space */
@@ -2307,8 +2307,8 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
if(HADDR_UNDEF == (dblock_addr = H5MF_alloc((H5F_t *)f, H5FD_MEM_FHEAP_DBLOCK, (hsize_t)write_size)))
HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap direct block")
- /* Update information about compressed direct block's
- * location & size
+ /* Update information about compressed direct block's
+ * location & size
*/
HDassert(par_iblock->ents[par_entry].addr == addr);
HDassert(par_iblock->filt_ents[par_entry].size == len);
@@ -2326,21 +2326,21 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
} /* end else */
} /* end if */
else {
- /* I/O filters are not enabled -- thus all we need to do is check to
- * see if the direct block is in temporary (AKA imaginary) file
+ /* I/O filters are not enabled -- thus all we need to do is check to
+ * see if the direct block is in temporary (AKA imaginary) file
* space, and move it to real file space if it is.
*
- * As in the I/O filters case above, we will have to touch up the
+ * As in the I/O filters case above, we will have to touch up the
* direct blocks parent if the direct block is relocated.
*
- * Recall that temporary file space need not be freed, which
+ * Recall that temporary file space need not be freed, which
* simplifies matters slightly.
*/
write_buf = dblock->blk;
write_size = dblock->size;
- /* Check to see if we must re-allocate direct block from 'temp.'
- * to 'normal' file space
+ /* Check to see if we must re-allocate direct block from 'temp.'
+ * to 'normal' file space
*/
if(at_tmp_addr) {
/* Allocate 'normal' space for the direct block */
@@ -2377,9 +2377,9 @@ H5HF__cache_dblock_pre_serialize(H5F_t *f, void *_thing,
} /* end if */
} /* end else */
- /* At this point, write_buf points to a buffer containing the image
+ /* At this point, write_buf points to a buffer containing the image
* of the direct block that is ready to copy into the image buffer,
- * and write_size contains the length of this buffer.
+ * and write_size contains the length of this buffer.
*
* Also, if image size or address has changed, the direct block's
* parent has been modified to reflect the change.
@@ -2419,14 +2419,14 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HF__cache_dblock_serialize
*
- * Purpose: In principle, this function is supposed to construct the on
- * disk image of the direct block, and place that image in the
+ * Purpose: In principle, this function is supposed to construct the on
+ * disk image of the direct block, and place that image in the
* image buffer provided by the metadata cache.
*
- * However, since there are cases in which the pre_serialize
- * function has to construct the on disk image to determine its size
+ * However, since there are cases in which the pre_serialize
+ * function has to construct the on disk image to determine its size
* and address, this function simply copies the image prepared by
- * the pre-serialize function into the supplied image buffer, and
+ * the pre-serialize function into the supplied image buffer, and
* discards a buffer if necessary.
*
* Return: Success: SUCCEED
@@ -2437,7 +2437,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_dblock_serialize(const H5F_t *f, void *image, size_t len,
void *_thing)
{
@@ -2462,8 +2462,8 @@ H5HF__cache_dblock_serialize(const H5F_t *f, void *image, size_t len,
/* Copy the image from *(dblock->write_buf) to *image */
H5MM_memcpy(image, dblock->write_buf, dblock->write_size);
- /* Free *(dblock->write_buf) if it was allocated by the
- * pre-serialize function
+ /* Free *(dblock->write_buf) if it was allocated by the
+ * pre-serialize function
*/
if(dblock->write_buf != dblock->blk)
H5MM_xfree(dblock->write_buf);
@@ -2490,7 +2490,7 @@ H5HF__cache_dblock_serialize(const H5F_t *f, void *image, size_t len,
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_dblock_notify(H5AC_notify_action_t action, void *_thing)
{
H5HF_direct_t *dblock = (H5HF_direct_t *)_thing; /* Fractal heap direct block */
@@ -2560,7 +2560,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_dblock_free_icr(void *_thing)
{
H5HF_direct_t *dblock = (H5HF_direct_t *)_thing; /* Fractal heap direct block */
@@ -2597,7 +2597,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HF__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size)
{
const H5HF_direct_t *dblock = (const H5HF_direct_t *)_thing; /* Fractal heap direct block */
@@ -2621,9 +2621,9 @@ H5HF__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size)
/*------------------------------------------------------------------------
* Function: H5HF__cache_verify_hdr_descendants_clean
*
- * Purpose: Sanity checking routine that verifies that all indirect
- * and direct blocks that are descendants of the supplied
- * instance of H5HF_hdr_t are clean. Set *clean to
+ * Purpose: Sanity checking routine that verifies that all indirect
+ * and direct blocks that are descendants of the supplied
+ * instance of H5HF_hdr_t are clean. Set *clean to
* TRUE if this is the case, and to FALSE otherwise.
*
* Update -- 8/24/15
@@ -2636,41 +2636,41 @@ H5HF__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size)
* remain dirty.
*
* To address this, updated the sanity checks in this function
- * to treat entries whose images are up to date as clean if
+ * to treat entries whose images are up to date as clean if
* a cache serialization is in progress.
*
* Update -- 9/29/16
*
* The implementation of flush dependencies has been changed.
- * Prior to this change, a flush dependency parent could be
+ * Prior to this change, a flush dependency parent could be
* flushed if and only if all its flush dependency descendants
- * were clean. In the new definition, a flush dependency
+ * were clean. In the new definition, a flush dependency
* parent can be flushed if all its immediate flush dependency
- * children are clean, regardless of any other dirty
- * descendants.
+ * children are clean, regardless of any other dirty
+ * descendants.
*
- * Further, metadata cache entries are now allowed to have
- * multiple flush dependency parents.
+ * Further, metadata cache entries are now allowed to have
+ * multiple flush dependency parents.
*
- * This means that the fractal heap is no longer ncessarily
+ * This means that the fractal heap is no longer ncessarily
* flushed from the bottom up.
*
- * For example, it is now possible for a dirty fractal heap
+ * For example, it is now possible for a dirty fractal heap
* header to be flushed before a dirty dblock, as long as the
- * there in an interviening iblock, and the header has no
+ * there in an interviening iblock, and the header has no
* dirty immediate flush dependency children.
*
- * Also, I gather that under some circumstances, a dblock
- * will be direct a flush dependency child both of the iblock
+ * Also, I gather that under some circumstances, a dblock
+ * will be direct a flush dependency child both of the iblock
* that points to it, and of the fractal heap header.
*
* As a result of these changes, the functionality of these
* sanity checking routines has been modified significantly.
* Instead of scanning the fractal heap from a starting point
- * down, and verifying that there were no dirty entries, the
- * functions now scan downward from the starting point and
- * verify that there are no dirty flush dependency children
- * of the specified flush dependency parent. In passing,
+ * down, and verifying that there were no dirty entries, the
+ * functions now scan downward from the starting point and
+ * verify that there are no dirty flush dependency children
+ * of the specified flush dependency parent. In passing,
* they also walk the data structure, and verify it.
*
*
@@ -2709,16 +2709,16 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
/* We have three basic scenarios we have to deal with:
*
- * The first, and most common case, is that there is a root iblock.
- * In this case we need to verify that the root iblock and all its
+ * The first, and most common case, is that there is a root iblock.
+ * In this case we need to verify that the root iblock and all its
* children are clean.
*
- * The second, and much less common case, is that in which the
- * the fractal heap contains only one direct block, which is
- * pointed to by hdr->man_dtable.table_addr. In this case, all we
+ * The second, and much less common case, is that in which the
+ * the fractal heap contains only one direct block, which is
+ * pointed to by hdr->man_dtable.table_addr. In this case, all we
* need to do is verify that the root direct block is clean.
*
- * Finally, it is possible that the fractal heap is empty, and
+ * Finally, it is possible that the fractal heap is empty, and
* has neither a root indirect block nor a root direct block.
* In this case, we have nothing to do.
*/
@@ -2726,15 +2726,15 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
/* There are two ways in which we can arrive at the first scenario.
*
* By far the most common is when hdr->root_iblock contains a pointer
- * to the root iblock -- in this case the root iblock is almost certainly
+ * to the root iblock -- in this case the root iblock is almost certainly
* pinned, although we can't count on that.
*
- * However, it is also possible that there is a root iblock that
- * is no longer pointed to by the header. In this case, the on
+ * However, it is also possible that there is a root iblock that
+ * is no longer pointed to by the header. In this case, the on
* disk address of the iblock will be in hdr->man_dtable.table_addr
* and hdr->man_dtable.curr_root_rows will contain a positive value.
*
- * Since the former case is far and away the most common, we don't
+ * Since the former case is far and away the most common, we don't
* worry too much about efficiency in the second case.
*/
if(hdr->root_iblock ||
@@ -2748,7 +2748,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
/* make note of the on disk address of the root iblock */
if(root_iblock == NULL)
/* hdr->man_dtable.table_addr must contain address of root
- * iblock. Check to see if it is in cache. If it is,
+ * iblock. Check to see if it is in cache. If it is,
* protect it and put its address in root_iblock.
*/
root_iblock_addr = hdr->man_dtable.table_addr;
@@ -2786,18 +2786,18 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
/* At this point, the root iblock may be pinned, protected,
* both, or neither, and we may or may not have a pointer
- * to root iblock in memory.
+ * to root iblock in memory.
*
* Before we call H5HF__cache_verify_iblock_descendants_clean(),
- * we must ensure that the root iblock is either pinned or
- * protected or both, and that we have a pointer to it.
+ * we must ensure that the root iblock is either pinned or
+ * protected or both, and that we have a pointer to it.
* Do this as follows:
*/
if(root_iblock == NULL) { /* we don't have ptr to root iblock */
if(0 == (root_iblock_status & H5AC_ES__IS_PROTECTED)) {
/* just protect the root iblock -- this will give us
- * the pointer we need to proceed, and ensure that
- * it is locked into the metadata cache for the
+ * the pointer we need to proceed, and ensure that
+ * it is locked into the metadata cache for the
* duration.
*
* Note that the udata is only used in the load callback.
@@ -2808,9 +2808,9 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
* The tag specified in the API context we received
* as a parameter (via API context) may not be correct.
* Grab the (hopefully) correct tag from the header,
- * and load it into the API context via the H5_BEGIN_TAG and
+ * and load it into the API context via the H5_BEGIN_TAG and
* H5_END_TAG macros. Note that any error bracked by
- * these macros must be reported with HGOTO_ERROR_TAG.
+ * these macros must be reported with HGOTO_ERROR_TAG.
*/
H5_BEGIN_TAG(hdr->heap_addr)
@@ -2825,7 +2825,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
/* the root iblock is protected, and we have no
* legitimate way of getting a pointer to it.
*
- * We square this circle by using the
+ * We square this circle by using the
* H5AC_get_entry_ptr_from_addr() to get the needed
* pointer.
*
@@ -2846,14 +2846,14 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
* be unpinned is if none of its children are in cache.
* This unfortunately means that if it is protected and
* not pinned, the fractal heap is in the process of loading
- * or inserting one of its children. The obvious
- * implication is that there is a significant chance that
+ * or inserting one of its children. The obvious
+ * implication is that there is a significant chance that
* the root iblock is in an unstable state.
*
- * All this suggests that using
- * H5AC_get_entry_ptr_from_addr() to obtain the pointer
- * to the protected root iblock is questionable here.
- * However, since this is test/debugging code, I expect
+ * All this suggests that using
+ * H5AC_get_entry_ptr_from_addr() to obtain the pointer
+ * to the protected root iblock is questionable here.
+ * However, since this is test/debugging code, I expect
* that we will use this approach until it causes problems,
* or we think of a better way.
*/
@@ -2863,8 +2863,8 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
} /* end else */
} /* end if */
else { /* root_iblock != NULL */
- /* we have the pointer to the root iblock. Protect it
- * if it is neither pinned nor protected -- otherwise we
+ /* we have the pointer to the root iblock. Protect it
+ * if it is neither pinned nor protected -- otherwise we
* are ready to go.
*/
H5HF_indirect_t * iblock = NULL;
@@ -2882,9 +2882,9 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
* The tag associated specified in the API context we received
* as a parameter (via API context) may not be correct.
* Grab the (hopefully) correct tag from the header,
- * and load it into the API context via the H5_BEGIN_TAG and
+ * and load it into the API context via the H5_BEGIN_TAG and
* H5_END_TAG macros. Note that any error bracked by
- * these macros must be reported with HGOTO_ERROR_TAG.
+ * these macros must be reported with HGOTO_ERROR_TAG.
*/
H5_BEGIN_TAG(hdr->heap_addr)
@@ -2953,8 +2953,8 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock in cache and is a flush dep parent.")
*clean = !((root_dblock_status & H5AC_ES__IS_DIRTY) &&
- (((root_dblock_status &
- H5AC_ES__IMAGE_IS_UP_TO_DATE) == 0) ||
+ (((root_dblock_status &
+ H5AC_ES__IMAGE_IS_UP_TO_DATE) == 0) ||
(!H5AC_get_serialization_in_progress(f))));
*fd_clean = *clean;
@@ -2965,8 +2965,8 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
} /* end else */
} /* end else-if */
else {
- /* this is scenario 3 -- the fractal heap is empty, and we
- * have nothing to do.
+ /* this is scenario 3 -- the fractal heap is empty, and we
+ * have nothing to do.
*/
*fd_clean = TRUE;
*clean = TRUE;
@@ -2981,62 +2981,62 @@ done:
/*------------------------------------------------------------------------
* Function: H5HF__cache_verify_iblock_descendants_clean
*
- * Purpose: Sanity checking routine that verifies that all indirect
- * and direct blocks that are descendants of the supplied
- * instance of H5HF_indirect_t are clean. Set *clean
+ * Purpose: Sanity checking routine that verifies that all indirect
+ * and direct blocks that are descendants of the supplied
+ * instance of H5HF_indirect_t are clean. Set *clean
* to TRUE if this is the case, and to FALSE otherwise.
*
- * In passing, the function also does a cursory check to
- * spot any obvious errors in the flush dependency setup.
- * If any problems are found, the function returns failure.
- * Note that these checks are not exhaustive, thus passing
- * them does not mean that the flush dependencies are
+ * In passing, the function also does a cursory check to
+ * spot any obvious errors in the flush dependency setup.
+ * If any problems are found, the function returns failure.
+ * Note that these checks are not exhaustive, thus passing
+ * them does not mean that the flush dependencies are
* correct -- only that there is nothing obviously wrong
* with them.
*
- * WARNING: At its top level call, this function is
- * intended to be called from H5HF_cache_iblock_flush(),
- * and thus presumes that the supplied indirect block
- * is in cache. Any other use of this function and
- * its descendants must insure that this assumption is
+ * WARNING: At its top level call, this function is
+ * intended to be called from H5HF_cache_iblock_flush(),
+ * and thus presumes that the supplied indirect block
+ * is in cache. Any other use of this function and
+ * its descendants must insure that this assumption is
* met.
*
- * Note that this function and
- * H5HF__cache_verify_descendant_iblocks_clean() are
+ * Note that this function and
+ * H5HF__cache_verify_descendant_iblocks_clean() are
* recursive co-routines.
*
* Update -- 9/29/16
*
* The implementation of flush dependencies has been changed.
- * Prior to this change, a flush dependency parent could be
+ * Prior to this change, a flush dependency parent could be
* flushed if and only if all its flush dependency descendants
- * were clean. In the new definition, a flush dependency
+ * were clean. In the new definition, a flush dependency
* parent can be flushed if all its immediate flush dependency
- * children are clean, regardless of any other dirty
- * descendants.
+ * children are clean, regardless of any other dirty
+ * descendants.
*
- * Further, metadata cache entries are now allowed to have
- * multiple flush dependency parents.
+ * Further, metadata cache entries are now allowed to have
+ * multiple flush dependency parents.
*
- * This means that the fractal heap is no longer ncessarily
+ * This means that the fractal heap is no longer ncessarily
* flushed from the bottom up.
*
- * For example, it is now possible for a dirty fractal heap
+ * For example, it is now possible for a dirty fractal heap
* header to be flushed before a dirty dblock, as long as the
- * there in an interviening iblock, and the header has no
+ * there in an interviening iblock, and the header has no
* dirty immediate flush dependency children.
*
- * Also, I gather that under some circumstances, a dblock
- * will be direct a flush dependency child both of the iblock
+ * Also, I gather that under some circumstances, a dblock
+ * will be direct a flush dependency child both of the iblock
* that points to it, and of the fractal heap header.
*
* As a result of these changes, the functionality of these
* sanity checking routines has been modified significantly.
* Instead of scanning the fractal heap from a starting point
- * down, and verifying that there were no dirty entries, the
- * functions now scan downward from the starting point and
- * verify that there are no dirty flush dependency children
- * of the specified flush dependency parent. In passing,
+ * down, and verifying that there were no dirty entries, the
+ * functions now scan downward from the starting point and
+ * verify that there are no dirty flush dependency children
+ * of the specified flush dependency parent. In passing,
* they also walk the data structure, and verify it.
*
* Return: Non-negative on success/Negative on failure
@@ -3096,17 +3096,17 @@ done:
* direct blocks pointed to by the supplied indirect block
* are either clean, or not in the cache.
*
- * In passing, the function also does a cursory check to
- * spot any obvious errors in the flush dependency setup.
- * If any problems are found, the function returns failure.
- * Note that these checks are not exhaustive, thus passing
- * them does not mean that the flush dependencies are
+ * In passing, the function also does a cursory check to
+ * spot any obvious errors in the flush dependency setup.
+ * If any problems are found, the function returns failure.
+ * Note that these checks are not exhaustive, thus passing
+ * them does not mean that the flush dependencies are
* correct -- only that there is nothing obviously wrong
* with them.
*
- * WARNING: This function presumes that the supplied
- * iblock is in the cache, and will not be removed
- * during the call. Caller must ensure that this is
+ * WARNING: This function presumes that the supplied
+ * iblock is in the cache, and will not be removed
+ * during the call. Caller must ensure that this is
* the case before the call.
*
* Update -- 8/24/15
@@ -3125,35 +3125,35 @@ done:
* Update -- 9/29/16
*
* The implementation of flush dependencies has been changed.
- * Prior to this change, a flush dependency parent could be
+ * Prior to this change, a flush dependency parent could be
* flushed if and only if all its flush dependency descendants
- * were clean. In the new definition, a flush dependency
+ * were clean. In the new definition, a flush dependency
* parent can be flushed if all its immediate flush dependency
- * children are clean, regardless of any other dirty
- * descendants.
+ * children are clean, regardless of any other dirty
+ * descendants.
*
- * Further, metadata cache entries are now allowed to have
- * multiple flush dependency parents.
+ * Further, metadata cache entries are now allowed to have
+ * multiple flush dependency parents.
*
- * This means that the fractal heap is no longer ncessarily
+ * This means that the fractal heap is no longer ncessarily
* flushed from the bottom up.
*
- * For example, it is now possible for a dirty fractal heap
+ * For example, it is now possible for a dirty fractal heap
* header to be flushed before a dirty dblock, as long as the
- * there in an interviening iblock, and the header has no
+ * there in an interviening iblock, and the header has no
* dirty immediate flush dependency children.
*
- * Also, I gather that under some circumstances, a dblock
- * will be direct a flush dependency child both of the iblock
+ * Also, I gather that under some circumstances, a dblock
+ * will be direct a flush dependency child both of the iblock
* that points to it, and of the fractal heap header.
*
* As a result of these changes, the functionality of these
* sanity checking routines has been modified significantly.
* Instead of scanning the fractal heap from a starting point
- * down, and verifying that there were no dirty entries, the
- * functions now scan downward from the starting point and
- * verify that there are no dirty flush dependency children
- * of the specified flush dependency parent. In passing,
+ * down, and verifying that there were no dirty entries, the
+ * functions now scan downward from the starting point and
+ * verify that there are no dirty flush dependency children
+ * of the specified flush dependency parent. In passing,
* they also walk the data structure, and verify it.
*
* Return: Non-negative on success/Negative on failure
@@ -3165,8 +3165,8 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
- H5HF_indirect_t *iblock, hbool_t *fd_clean, hbool_t *clean,
+H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
+ H5HF_indirect_t *iblock, hbool_t *fd_clean, hbool_t *clean,
hbool_t *has_dblocks)
{
unsigned num_direct_rows;
@@ -3224,16 +3224,16 @@ H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
(((dblock_status & H5AC_ES__IMAGE_IS_UP_TO_DATE) == 0) ||
(!H5AC_get_serialization_in_progress(f)))) {
*clean = FALSE;
-
+
if(H5AC_flush_dependency_exists(f, fd_parent_addr, dblock_addr, &fd_exists) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
- if(fd_exists)
+ if(fd_exists)
*fd_clean = FALSE;
} /* end if */
- /* If a child dblock is in cache, it must have a flush
- * dependency relationship with this iblock. Test this
+ /* If a child dblock is in cache, it must have a flush
+ * dependency relationship with this iblock. Test this
* here.
*/
if(H5AC_flush_dependency_exists(f, iblock_addr, dblock_addr, &fd_exists) < 0)
@@ -3246,7 +3246,7 @@ H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
i++;
} /* end while */
-
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5HF__cache_verify_iblocks_dblocks_clean() */
@@ -3260,17 +3260,17 @@ done:
* direct blocks pointed to by the supplied indirect block
* are either clean, or not in the cache.
*
- * In passing, the function also does a cursory check to
- * spot any obvious errors in the flush dependency setup.
- * If any problems are found, the function returns failure.
- * Note that these checks are not exhaustive, thus passing
- * them does not mean that the flush dependencies are
+ * In passing, the function also does a cursory check to
+ * spot any obvious errors in the flush dependency setup.
+ * If any problems are found, the function returns failure.
+ * Note that these checks are not exhaustive, thus passing
+ * them does not mean that the flush dependencies are
* correct -- only that there is nothing obviously wrong
* with them.
*
- * WARNING: This function presumes that the supplied
- * iblock is in the cache, and will not be removed
- * during the call. Caller must ensure that this is
+ * WARNING: This function presumes that the supplied
+ * iblock is in the cache, and will not be removed
+ * during the call. Caller must ensure that this is
* the case before the call.
*
* Update -- 8/24/15
@@ -3289,35 +3289,35 @@ done:
* Update -- 9/29/16
*
* The implementation of flush dependencies has been changed.
- * Prior to this change, a flush dependency parent could be
+ * Prior to this change, a flush dependency parent could be
* flushed if and only if all its flush dependency descendants
- * were clean. In the new definition, a flush dependency
+ * were clean. In the new definition, a flush dependency
* parent can be flushed if all its immediate flush dependency
- * children are clean, regardless of any other dirty
- * descendants.
+ * children are clean, regardless of any other dirty
+ * descendants.
*
- * Further, metadata cache entries are now allowed to have
+ * Further, metadata cache entries are now allowed to have
* multiple flush dependency parents.
*
- * This means that the fractal heap is no longer ncessarily
+ * This means that the fractal heap is no longer ncessarily
* flushed from the bottom up.
*
- * For example, it is now possible for a dirty fractal heap
+ * For example, it is now possible for a dirty fractal heap
* header to be flushed before a dirty dblock, as long as the
- * there in an interviening iblock, and the header has no
+ * there in an interviening iblock, and the header has no
* dirty immediate flush dependency children.
*
- * Also, I gather that under some circumstances, a dblock
- * will be direct a flush dependency child both of the iblock
+ * Also, I gather that under some circumstances, a dblock
+ * will be direct a flush dependency child both of the iblock
* that points to it, and of the fractal heap header.
*
* As a result of these changes, the functionality of these
* sanity checking routines has been modified significantly.
* Instead of scanning the fractal heap from a starting point
- * down, and verifying that there were no dirty entries, the
- * functions now scan downward from the starting point and
- * verify that there are no dirty flush dependency children
- * of the specified flush dependency parent. In passing,
+ * down, and verifying that there were no dirty entries, the
+ * functions now scan downward from the starting point and
+ * verify that there are no dirty flush dependency children
+ * of the specified flush dependency parent. In passing,
* they also walk the data structure, and verify it.
*
*
@@ -3388,54 +3388,54 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
*fd_clean = FALSE;
} /* end if */
- /* if the child iblock is in cache and *fd_clean is TRUE,
+ /* if the child iblock is in cache and *fd_clean is TRUE,
* we must continue to explore down the fractal heap tree
- * structure to verify that all descendant blocks that are
- * flush dependency children of the entry at parent_addr are
- * either clean, or not in the metadata cache. We do this
- * with a recursive call to
+ * structure to verify that all descendant blocks that are
+ * flush dependency children of the entry at parent_addr are
+ * either clean, or not in the metadata cache. We do this
+ * with a recursive call to
* H5HF__cache_verify_iblock_descendants_clean().
* However, we can't make this call unless the child iblock
- * is somehow locked into the cache -- typically via either
+ * is somehow locked into the cache -- typically via either
* pinning or protecting.
*
* If the child iblock is pinned, we can look up its pointer
- * on the current iblock's pinned child iblock list, and
+ * on the current iblock's pinned child iblock list, and
* and use that pointer in the recursive call.
*
* If the entry is unprotected and unpinned, we simply
* protect it.
*
- * If, however, the the child iblock is already protected,
- * but not pinned, we have a bit of a problem, as we have
+ * If, however, the the child iblock is already protected,
+ * but not pinned, we have a bit of a problem, as we have
* no legitimate way of looking up its pointer in memory.
*
* To solve this problem, I have added a new metadata cache
- * call to obtain the pointer.
+ * call to obtain the pointer.
*
- * WARNING: This call should be used only in debugging
- * routines, and it should be avoided there when
- * possible.
+ * WARNING: This call should be used only in debugging
+ * routines, and it should be avoided there when
+ * possible.
*
- * Further, if we ever multi-thread the cache,
- * this routine will have to be either discarded
+ * Further, if we ever multi-thread the cache,
+ * this routine will have to be either discarded
* or heavily re-worked.
*
- * Finally, keep in mind that the entry whose
- * pointer is obtained in this fashion may not
- * be in a stable state.
+ * Finally, keep in mind that the entry whose
+ * pointer is obtained in this fashion may not
+ * be in a stable state.
*
- * Assuming that the flush dependency code is working
- * as it should, the only reason for the child entry to
+ * Assuming that the flush dependency code is working
+ * as it should, the only reason for the child entry to
* be unpinned is if none of its children are in cache.
- * This unfortunately means that if it is protected and
+ * This unfortunately means that if it is protected and
* not pinned, the fractal heap is in the process of loading
* or inserting one of its children. The obvious implication
- * is that there is a significant chance that the child
+ * is that there is a significant chance that the child
* iblock is in an unstable state.
*
- * All this suggests that using the new call to obtain the
- * pointer to the protected child iblock is questionable
+ * All this suggests that using the new call to obtain the
+ * pointer to the protected child iblock is questionable
* here. However, since this is test/debugging code, I
* expect that we will use this approach until it causes
* problems, or we think of a better way.
@@ -3491,8 +3491,8 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
child_iblock = iblock->child_iblocks[i - first_iblock_index];
} /* end else */
- /* At this point, one way or another we should have
- * a pointer to the child iblock. Verify that we
+ /* At this point, one way or another we should have
+ * a pointer to the child iblock. Verify that we
* that we have the correct one.
*/
HDassert(child_iblock);
@@ -3504,8 +3504,8 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
if(H5HF__cache_verify_iblock_descendants_clean(f, fd_parent_addr, child_iblock, &child_iblock_status, fd_clean, clean) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify child iblock clean.")
- /* if iblock_addr != fd_parent_addr, verify that a flush
- * dependency relationship exists between iblock and
+ /* if iblock_addr != fd_parent_addr, verify that a flush
+ * dependency relationship exists between iblock and
* the child iblock.
*/
if(fd_parent_addr != iblock_addr) {
diff --git a/src/H5HFiblock.c b/src/H5HFiblock.c
index 20a62b7..07eb9cd 100644
--- a/src/H5HFiblock.c
+++ b/src/H5HFiblock.c
@@ -1805,7 +1805,7 @@ H5HF__man_iblock_parent_info(const H5HF_hdr_t *hdr, hsize_t block_off,
/* Sanity check - first lookup must be an indirect block */
HDassert(row >= hdr->man_dtable.max_direct_rows);
- /* Traverse down, until a direct block at the offset is found, then
+ /* Traverse down, until a direct block at the offset is found, then
* use previous (i.e. parent's) offset, row, and column.
*/
prev_par_block_off = par_block_off = 0;
diff --git a/src/H5HFman.c b/src/H5HFman.c
index e5b5cb8..ea00546 100644
--- a/src/H5HFman.c
+++ b/src/H5HFman.c
@@ -238,7 +238,7 @@ H5HF_man_get_obj_len(H5HF_hdr_t *hdr, const uint8_t *id, size_t *obj_len_p)
HDassert(hdr);
HDassert(id);
HDassert(obj_len_p);
-
+
/* Skip over the flag byte */
id++;
@@ -276,7 +276,7 @@ H5HF__man_get_obj_off(const H5HF_hdr_t *hdr, const uint8_t *id, hsize_t *obj_off
HDassert(hdr);
HDassert(id);
HDassert(obj_off_p);
-
+
/* Skip over the flag byte */
id++;
@@ -307,8 +307,8 @@ H5HF__man_op_real(H5HF_hdr_t *hdr, const uint8_t *id,
{
H5HF_direct_t *dblock = NULL; /* Pointer to direct block to query */
unsigned dblock_access_flags; /* Access method for direct block */
- /* must equal either
- * H5AC__NO_FLAGS_SET or
+ /* must equal either
+ * H5AC__NO_FLAGS_SET or
* H5AC__READ_ONLY_FLAG
*/
haddr_t dblock_addr; /* Direct block address */
diff --git a/src/H5HFpkg.h b/src/H5HFpkg.h
index b38a897..83e46fc 100644
--- a/src/H5HFpkg.h
+++ b/src/H5HFpkg.h
@@ -646,9 +646,9 @@ H5_DLL herr_t H5HF_hdr_dest(H5HF_hdr_t *hdr);
H5_DLL herr_t H5HF_iblock_incr(H5HF_indirect_t *iblock);
H5_DLL herr_t H5HF__iblock_decr(H5HF_indirect_t *iblock);
H5_DLL herr_t H5HF_iblock_dirty(H5HF_indirect_t *iblock);
-H5_DLL herr_t H5HF__man_iblock_root_create(H5HF_hdr_t *hdr,
+H5_DLL herr_t H5HF__man_iblock_root_create(H5HF_hdr_t *hdr,
size_t min_dblock_size);
-H5_DLL herr_t H5HF__man_iblock_root_double(H5HF_hdr_t *hdr,
+H5_DLL herr_t H5HF__man_iblock_root_double(H5HF_hdr_t *hdr,
size_t min_dblock_size);
H5_DLL herr_t H5HF__man_iblock_alloc_row(H5HF_hdr_t *hdr,
H5HF_free_section_t **sec_node);
@@ -779,13 +779,13 @@ H5_DLL herr_t H5HF__space_add(H5HF_hdr_t *hdr, H5HF_free_section_t *node,
H5_DLL htri_t H5HF__space_find(H5HF_hdr_t *hdr, hsize_t request,
H5HF_free_section_t **node);
H5_DLL herr_t H5HF__space_revert_root(const H5HF_hdr_t *hdr);
-H5_DLL herr_t H5HF__space_create_root(const H5HF_hdr_t *hdr,
+H5_DLL herr_t H5HF__space_create_root(const H5HF_hdr_t *hdr,
H5HF_indirect_t *root_iblock);
H5_DLL herr_t H5HF__space_size(H5HF_hdr_t *hdr, hsize_t *fs_size);
H5_DLL herr_t H5HF__space_remove(H5HF_hdr_t *hdr, H5HF_free_section_t *node);
H5_DLL herr_t H5HF__space_close(H5HF_hdr_t *hdr);
H5_DLL herr_t H5HF__space_delete(H5HF_hdr_t *hdr);
-H5_DLL herr_t H5HF__space_sect_change_class(H5HF_hdr_t *hdr,
+H5_DLL herr_t H5HF__space_sect_change_class(H5HF_hdr_t *hdr,
H5HF_free_section_t *sect, uint16_t new_class);
/* Free space section routines */
diff --git a/src/H5HFsection.c b/src/H5HFsection.c
index f5ac8e5..5aeac50 100644
--- a/src/H5HFsection.c
+++ b/src/H5HFsection.c
@@ -156,7 +156,7 @@ static hbool_t H5HF_sect_indirect_is_first(H5HF_free_section_t *sect);
static H5HF_indirect_t * H5HF_sect_indirect_get_iblock(H5HF_free_section_t *sect);
static hsize_t H5HF_sect_indirect_iblock_off(const H5HF_free_section_t *sect);
static H5HF_free_section_t * H5HF_sect_indirect_top(H5HF_free_section_t *sect);
-static herr_t H5HF__sect_indirect_merge_row(H5HF_hdr_t *hdr,
+static herr_t H5HF__sect_indirect_merge_row(H5HF_hdr_t *hdr,
H5HF_free_section_t *sect1, H5HF_free_section_t *sect2);
static herr_t H5HF__sect_indirect_build_parent(H5HF_hdr_t *hdr, H5HF_free_section_t *sect);
static herr_t H5HF__sect_indirect_shrink(H5HF_hdr_t *hdr,
diff --git a/src/H5HFtiny.c b/src/H5HFtiny.c
index 1407861..f4b0ae1 100644
--- a/src/H5HFtiny.c
+++ b/src/H5HFtiny.c
@@ -263,11 +263,11 @@ H5HF_tiny_op_real(H5HF_hdr_t *hdr, const uint8_t *id, H5HF_operator_t op,
HDassert(hdr);
HDassert(id);
HDassert(op);
-
+
/* Get the object's encoded length */
/* H5HF_tiny_obj_len can't fail */
ret_value = H5HF_tiny_get_obj_len(hdr, id, &enc_obj_size);
-
+
/* Advance past flag byte(s) */
if(!hdr->tiny_len_extended)
id++;
diff --git a/src/H5HGcache.c b/src/H5HGcache.c
index 44c7a82..df4dee3 100644
--- a/src/H5HGcache.c
+++ b/src/H5HGcache.c
@@ -64,10 +64,10 @@ static herr_t H5HG__cache_heap_get_initial_load_size(void *udata, size_t *image_
static herr_t H5HG__cache_heap_get_final_load_size(const void *_image,
size_t image_len, void *udata, size_t *actual_len);
static void *H5HG__cache_heap_deserialize(const void *image, size_t len,
- void *udata, hbool_t *dirty);
+ void *udata, hbool_t *dirty);
static herr_t H5HG__cache_heap_image_len(const void *thing, size_t *image_len);
static herr_t H5HG__cache_heap_serialize(const H5F_t *f, void *image,
- size_t len, void *thing);
+ size_t len, void *thing);
static herr_t H5HG__cache_heap_free_icr(void *thing);
/* Prefix deserialization */
@@ -158,10 +158,10 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HG__cache_heap_get_initial_load_size()
*
- * Purpose: Return the initial speculative read size to the metadata
- * cache. This size will be used in the initial attempt to read
- * the global heap. If this read is too small, the cache will
- * try again with the correct value obtained from
+ * Purpose: Return the initial speculative read size to the metadata
+ * cache. This size will be used in the initial attempt to read
+ * the global heap. If this read is too small, the cache will
+ * try again with the correct value obtained from
* H5HG__cache_get_final_load_size().
*
* Return: Success: SUCCEED
@@ -232,7 +232,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HG__cache_heap_deserialize
*
- * Purpose: Given a buffer containing the on disk image of the global
+ * Purpose: Given a buffer containing the on disk image of the global
* heap, deserialize it, load its contents into a newly allocated
* instance of H5HG_heap_t, and return a pointer to the new instance.
*
@@ -334,11 +334,11 @@ H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata,
heap->obj[idx].begin = begin;
/*
- * The total storage size includes the size of the object
- * header and is zero padded so the next object header is
- * properly aligned. The entire obj array was calloc'ed,
- * so no need to zero the space here. The last bit of space
- * is the free space object whose size is never padded and
+ * The total storage size includes the size of the object
+ * header and is zero padded so the next object header is
+ * properly aligned. The entire obj array was calloc'ed,
+ * so no need to zero the space here. The last bit of space
+ * is the free space object whose size is never padded and
* already includes the object header.
*/
if(idx > 0) {
@@ -384,7 +384,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HG__cache_heap_image_len
*
- * Purpose: Return the on disk image size of the global heap to the
+ * Purpose: Return the on disk image size of the global heap to the
* metadata cache via the image_len.
*
* Return: Success: SUCCEED
@@ -418,7 +418,7 @@ H5HG__cache_heap_image_len(const void *_thing, size_t *image_len)
/*-------------------------------------------------------------------------
* Function: H5HG__cache_heap_serialize
*
- * Purpose: Given an appropriately sized buffer and an instance of
+ * Purpose: Given an appropriately sized buffer and an instance of
* H5HG_heap_t, serialize the global heap for writing to file,
* and copy the serialized version into the buffer.
*
diff --git a/src/H5HLcache.c b/src/H5HLcache.c
index 2841a4c..a8f1f45 100644
--- a/src/H5HLcache.c
+++ b/src/H5HLcache.c
@@ -76,16 +76,16 @@ static void *H5HL__cache_prefix_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5HL__cache_prefix_image_len(const void *thing, size_t *image_len);
static herr_t H5HL__cache_prefix_serialize(const H5F_t *f, void *image,
- size_t len, void *thing);
+ size_t len, void *thing);
static herr_t H5HL__cache_prefix_free_icr(void *thing);
/* Local heap data block */
static herr_t H5HL__cache_datablock_get_initial_load_size(void *udata, size_t *image_len);
static void *H5HL__cache_datablock_deserialize(const void *image, size_t len,
- void *udata, hbool_t *dirty);
+ void *udata, hbool_t *dirty);
static herr_t H5HL__cache_datablock_image_len(const void *thing, size_t *image_len);
static herr_t H5HL__cache_datablock_serialize(const H5F_t *f, void *image,
- size_t len, void *thing);
+ size_t len, void *thing);
static herr_t H5HL__cache_datablock_notify(H5C_notify_action_t action, void *_thing);
static herr_t H5HL__cache_datablock_free_icr(void *thing);
@@ -175,12 +175,12 @@ H5HL__hdr_deserialize( H5HL_t *heap, const uint8_t *image,
HDassert(udata);
/* Check magic number */
- if(HDmemcmp(image, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ if(HDmemcmp(image, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad local heap signature")
image += H5_SIZEOF_MAGIC;
/* Version */
- if(H5HL_VERSION != *image++)
+ if(H5HL_VERSION != *image++)
HGOTO_ERROR(H5E_HEAP, H5E_VERSION, FAIL, "wrong version number in local heap")
/* Reserved */
@@ -322,7 +322,7 @@ H5HL__fl_serialize(const H5HL_t *heap)
/*-------------------------------------------------------------------------
* Function: H5HL__cache_prefix_get_initial_load_size()
*
- * Purpose: Return the initial size of the buffer the metadata cache should
+ * Purpose: Return the initial size of the buffer the metadata cache should
* load from file and pass to the deserialize routine.
*
* Return: Success: SUCCEED
@@ -351,7 +351,7 @@ H5HL__cache_prefix_get_initial_load_size(void H5_ATTR_UNUSED *_udata, size_t *im
/*-------------------------------------------------------------------------
* Function: H5HL__cache_prefix_get_final_load_size()
*
- * Purpose: Return the final size of the buffer the metadata cache should
+ * Purpose: Return the final size of the buffer the metadata cache should
* load from file and pass to the deserialize routine.
*
* Return: Success: SUCCEED
@@ -401,7 +401,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HL__cache_prefix_deserialize
*
- * Purpose: Given a buffer containing the on disk image of the local
+ * Purpose: Given a buffer containing the on disk image of the local
* heap prefix, deserialize it, load its contents into a newly allocated
* instance of H5HL_prfx_t, and return a pointer to the new instance.
*
@@ -471,8 +471,8 @@ H5HL__cache_prefix_deserialize(const void *_image, size_t len, void *_udata,
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "can't initialize free list")
} /* end if */
else
- /* Note that the heap should _NOT_ be a single
- * object in the cache
+ /* Note that the heap should _NOT_ be a single
+ * object in the cache
*/
heap->single_cache_obj = FALSE;
} /* end if */
@@ -500,7 +500,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HL__cache_prefix_image_len
*
- * Purpose: Return the on disk image size of a local heap prefix to the
+ * Purpose: Return the on disk image size of a local heap prefix to the
* metadata cache via the image_len.
*
* Return: Success: SUCCEED
@@ -527,8 +527,8 @@ H5HL__cache_prefix_image_len(const void *_thing, size_t *image_len)
/* Set the prefix's size */
*image_len = prfx->heap->prfx_size;
- /* If the heap is stored as a single object, add in the
- * data block size also
+ /* If the heap is stored as a single object, add in the
+ * data block size also
*/
if(prfx->heap->single_cache_obj)
*image_len += prfx->heap->dblk_size;
@@ -540,9 +540,9 @@ H5HL__cache_prefix_image_len(const void *_thing, size_t *image_len)
/*-------------------------------------------------------------------------
* Function: H5HL__cache_prefix_serialize
*
- * Purpose: Given a pointer to an instance of H5HL_prfx_t and an
- * appropriately sized buffer, serialize the contents of the
- * instance for writing to disk, and copy the serialized data
+ * Purpose: Given a pointer to an instance of H5HL_prfx_t and an
+ * appropriately sized buffer, serialize the contents of the
+ * instance for writing to disk, and copy the serialized data
* into the buffer.
*
* Return: Success: SUCCEED
@@ -602,9 +602,9 @@ H5HL__cache_prefix_serialize(const H5F_t *f, void *_image, size_t len,
if((size_t)(image - (uint8_t *)_image) < heap->prfx_size) {
size_t gap; /* Size of gap between prefix and data block */
- /* Set image to the start of the data block. This is necessary
- * because there may be a gap between the used portion of
- * the prefix and the data block due to alignment constraints.
+ /* Set image to the start of the data block. This is necessary
+ * because there may be a gap between the used portion of
+ * the prefix and the data block due to alignment constraints.
*/
gap = heap->prfx_size - (size_t)(image - (uint8_t *)_image);
HDmemset(image, 0, gap);
@@ -635,11 +635,11 @@ H5HL__cache_prefix_serialize(const H5F_t *f, void *_image, size_t len,
/*-------------------------------------------------------------------------
* Function: H5HL__cache_prefix_free_icr
*
- * Purpose: Free the supplied in core representation of a local heap
+ * Purpose: Free the supplied in core representation of a local heap
* prefix.
*
- * Note that this function handles the partially initialize prefix
- * from a failed speculative load attempt. See comments below for
+ * Note that this function handles the partially initialize prefix
+ * from a failed speculative load attempt. See comments below for
* details.
*
* Note: The metadata cache sets the object's cache_info.magic to
@@ -680,7 +680,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5HL__cache_datablock_get_initial_load_size()
*
- * Purpose: Tell the metadata cache how large a buffer to read from
+ * Purpose: Tell the metadata cache how large a buffer to read from
* file when loading a datablock. In this case, we simply lookup
* the correct value in the user data, and return it in *image_len.
*
@@ -714,7 +714,7 @@ H5HL__cache_datablock_get_initial_load_size(void *_udata, size_t *image_len)
/*-------------------------------------------------------------------------
* Function: H5HL__cache_datablock_deserialize
*
- * Purpose: Given a buffer containing the on disk image of a local
+ * Purpose: Given a buffer containing the on disk image of a local
* heap data block, deserialize it, load its contents into a newly allocated
* instance of H5HL_dblk_t, and return a pointer to the new instance.
*
@@ -871,7 +871,7 @@ H5HL__cache_datablock_serialize(const H5F_t *f, void *image, size_t len,
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5HL__cache_datablock_notify(H5C_notify_action_t action, void *_thing)
{
H5HL_dblk_t *dblk = (H5HL_dblk_t *)_thing; /* Pointer to the local heap data block */
diff --git a/src/H5HLdblk.c b/src/H5HLdblk.c
index 762a213..c3fcffe 100644
--- a/src/H5HLdblk.c
+++ b/src/H5HLdblk.c
@@ -242,7 +242,7 @@ H5HL__dblk_realloc(H5F_t *f, H5HL_t *heap, size_t new_heap_size))
/* Insert data block into cache (pinned) */
if(FAIL == H5AC_insert_entry(f, H5AC_LHEAP_DBLK, new_addr, dblk, H5AC__PIN_ENTRY_FLAG))
H5E_THROW(H5E_CANTINIT, "unable to cache local heap data block");
-
+
dblk = NULL;
/* Reset 'single cache object' flag */
diff --git a/src/H5HLint.c b/src/H5HLint.c
index e625f3d..cc3e3ea 100644
--- a/src/H5HLint.c
+++ b/src/H5HLint.c
@@ -213,7 +213,7 @@ CATCH
if(NULL != (fl = H5FL_FREE(H5HL_free_t, fl)))
H5E_THROW(H5E_CANTFREE, "unable to free local heap free list");
} /* end while */
-
+
if(NULL != (heap = H5FL_FREE(H5HL_t, heap)))
H5E_THROW(H5E_CANTFREE, "unable to free local heap");
diff --git a/src/H5L.c b/src/H5L.c
index 43c1593..00fb221 100644
--- a/src/H5L.c
+++ b/src/H5L.c
@@ -317,7 +317,7 @@ H5Lmove(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id,
H5CX_set_lcpl(lcpl_id);
/* Verify access property list and set up collective metadata if appropriate */
- if(H5CX_set_apl(&lapl_id, H5P_CLS_LACC,
+ if(H5CX_set_apl(&lapl_id, H5P_CLS_LACC,
((src_loc_id != H5L_SAME_LOC) ? src_loc_id : dst_loc_id), TRUE) < 0)
HGOTO_ERROR(H5E_LINK, H5E_CANTSET, FAIL, "can't set access property list info")
@@ -386,7 +386,7 @@ H5Lcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id,
H5CX_set_lcpl(lcpl_id);
/* Verify access property list and set up collective metadata if appropriate */
- if(H5CX_set_apl(&lapl_id, H5P_CLS_LACC,
+ if(H5CX_set_apl(&lapl_id, H5P_CLS_LACC,
((src_loc_id != H5L_SAME_LOC) ? src_loc_id : dst_loc_id), TRUE) < 0)
HGOTO_ERROR(H5E_LINK, H5E_CANTSET, FAIL, "can't set access property list info")
diff --git a/src/H5MF.c b/src/H5MF.c
index 6d28ee8..dac713b 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -258,7 +258,7 @@ H5MF__alloc_to_fs_type(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size, H5F_mem_pa
if(size >= f->shared->fs_page_size) {
if(H5F_HAS_FEATURE(f, H5FD_FEAT_PAGED_AGGR)) { /* multi or split driver */
/* For non-contiguous address space, map to large size free-space manager for each alloc_type */
- if(H5FD_MEM_DEFAULT == f->shared->fs_type_map[alloc_type])
+ if(H5FD_MEM_DEFAULT == f->shared->fs_type_map[alloc_type])
*fs_type = (H5F_mem_page_t) (alloc_type + (H5FD_MEM_NTYPES - 1));
else
*fs_type = (H5F_mem_page_t) (f->shared->fs_type_map[alloc_type] + (H5FD_MEM_NTYPES - 1));
@@ -909,7 +909,7 @@ HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", FUNC, (unsigned)alloc_typ
H5MF__alloc_to_fs_type(f, alloc_type, size, &ptype);
switch(ptype) {
- case H5F_MEM_PAGE_GENERIC:
+ case H5F_MEM_PAGE_GENERIC:
case H5F_MEM_PAGE_LARGE_BTREE:
case H5F_MEM_PAGE_LARGE_DRAW:
case H5F_MEM_PAGE_LARGE_GHEAP:
@@ -951,7 +951,7 @@ HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", FUNC, (unsigned)alloc_typ
}
break;
- case H5F_MEM_PAGE_META:
+ case H5F_MEM_PAGE_META:
case H5F_MEM_PAGE_DRAW:
case H5F_MEM_PAGE_BTREE:
case H5F_MEM_PAGE_GHEAP:
@@ -979,13 +979,13 @@ HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", FUNC, (unsigned)alloc_typ
node = NULL;
- /* Insert the new page into the Page Buffer list of new pages so
+ /* Insert the new page into the Page Buffer list of new pages so
we don't read an empty page from disk */
if(f->shared->page_buf != NULL && H5PB_add_new_page(f, alloc_type, new_page) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, HADDR_UNDEF, "can't add new page to Page Buffer new page list")
ret_value = new_page;
- }
+ }
break;
case H5F_MEM_PAGE_NTYPES:
@@ -1315,7 +1315,7 @@ HDfprintf(stderr, "%s: Entering: alloc_type = %u, addr = %a, size = %Hu, extra_r
end = addr + size;
/* For paged aggregation:
- * To extend a small block: can only extend if not crossing page boundary
+ * To extend a small block: can only extend if not crossing page boundary
* To extend a large block at EOA: calculate in advance mis-aligned fragment so EOA will still end at page boundary
*/
if(H5F_PAGED_AGGR(f)) {
@@ -1666,18 +1666,18 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
/* check args */
HDassert(f);
- /* If there have been no file space allocations / deallocation so
- * far, must call H5MF_tidy_self_referential_fsm_hack() to float
+ /* If there have been no file space allocations / deallocation so
+ * far, must call H5MF_tidy_self_referential_fsm_hack() to float
* all self referential FSMs and release file space allocated to
* them. Otherwise, the function will be called after the format
* conversion, and will become very confused.
*
* The situation is further complicated if a cache image exists
* and had not yet been loaded into the metadata cache. In this
- * case, call H5AC_force_cache_image_load() instead of
+ * case, call H5AC_force_cache_image_load() instead of
* H5MF_tidy_self_referential_fsm_hack(). H5AC_force_cache_image_load()
- * will load the cache image, and then call
- * H5MF_tidy_self_referential_fsm_hack() to discard the cache image
+ * will load the cache image, and then call
+ * H5MF_tidy_self_referential_fsm_hack() to discard the cache image
* block.
*/
if(f->shared->first_alloc_dealloc) {
@@ -1701,8 +1701,8 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
if(H5F_PAGED_AGGR(f)) {
H5F_mem_page_t ptype; /* Memory type for iteration */
- /* Iterate over all the free space types that have managers and
- * get each free list's space
+ /* Iterate over all the free space types that have managers and
+ * get each free list's space
*/
for(ptype = H5F_MEM_PAGE_META; ptype < H5F_MEM_PAGE_NTYPES; ptype++) {
/* Test to see if we need to switch rings -- do so if required */
@@ -1723,8 +1723,8 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
else {
H5FD_mem_t type; /* Memory type for iteration */
- /* Iterate over all the free space types that have managers and
- * get each free list's space
+ /* Iterate over all the free space types that have managers and
+ * get each free list's space
*/
for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; type++) {
/* Test to see if we need to switch rings -- do so if required */
@@ -1815,9 +1815,9 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
*/
HDassert(H5F_addr_defined(f->shared->sblock->ext_addr));
- /* file space for all non-empty free space managers should be
+ /* file space for all non-empty free space managers should be
* allocated at this point, and these free space managers should
- * be written to file and thus their headers and section info
+ * be written to file and thus their headers and section info
* entries in the metadata cache should be clean.
*/
@@ -1847,7 +1847,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
/* Test to see if we need to switch rings -- do so if required */
if(H5MF__fsm_type_is_self_referential(f, (H5F_mem_page_t)type))
needed_ring = H5AC_RING_MDFSM;
- else
+ else
needed_ring = H5AC_RING_RDFSM;
if(needed_ring != curr_ring) {
@@ -1888,8 +1888,8 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
if(HADDR_UNDEF == (final_eoa = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT)) )
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get file size")
- /* f->shared->eoa_post_fsm_fsalloc is undefined if there has
- * been no file space allocation or deallocation since file
+ /* f->shared->eoa_post_fsm_fsalloc is undefined if there has
+ * been no file space allocation or deallocation since file
* open.
*/
HDassert((f->shared->first_alloc_dealloc) || (final_eoa == f->shared->eoa_post_fsm_fsalloc));
@@ -1995,7 +1995,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
/* gather data for the free space manager superblock extension message.
* Only need addresses of FSMs and eoa prior to allocation of
- * file space for the self referential free space managers. Other
+ * file space for the self referential free space managers. Other
* data was gathered above.
*/
for(ptype = H5F_MEM_PAGE_META; ptype < H5F_MEM_PAGE_NTYPES; ptype++)
@@ -2049,13 +2049,13 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
* been no file space allocation or deallocation since file
* open.
*
- * If there is a cache image in the file at file open,
- * f->shared->first_alloc_dealloc will always be FALSE unless
+ * If there is a cache image in the file at file open,
+ * f->shared->first_alloc_dealloc will always be FALSE unless
* the file is opened R/O, as otherwise, the image will have been
* read and discarded by this point.
*
- * If a cache image was created on file close, the actual EOA
- * should be in f->shared->eoa_post_mdci_fsalloc. Note that in
+ * If a cache image was created on file close, the actual EOA
+ * should be in f->shared->eoa_post_mdci_fsalloc. Note that in
* this case, it is conceivable that f->shared->first_alloc_dealloc
* will still be TRUE, as the cache image is allocated directly from
* the file driver layer. However, as this possibility seems remote,
@@ -2067,8 +2067,8 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
(final_eoa == f->shared->eoa_post_mdci_fsalloc)));
} /* end if */
else {
- /* Iterate over all the free space types that have managers
- * and get each free list's space
+ /* Iterate over all the free space types that have managers
+ * and get each free list's space
*/
for(ptype = H5F_MEM_PAGE_META; ptype < H5F_MEM_PAGE_NTYPES; ptype++)
if(H5MF__close_delete_fstype(f, ptype) < 0)
@@ -2381,16 +2381,16 @@ H5MF_get_free_sections(H5F_t *f, H5FD_mem_t type, size_t nsects, H5F_sect_info_t
HDassert(f->shared);
HDassert(f->shared->lf);
- /* H5MF_tidy_self_referential_fsm_hack() will fail if any self
+ /* H5MF_tidy_self_referential_fsm_hack() will fail if any self
* referential FSM is opened prior to the call to it. Thus call
* it here if necessary and if it hasn't been called already.
*
* The situation is further complicated if a cache image exists
* and had not yet been loaded into the metadata cache. In this
- * case, call H5AC_force_cache_image_load() instead of
+ * case, call H5AC_force_cache_image_load() instead of
* H5MF_tidy_self_referential_fsm_hack(). H5AC_force_cache_image_load()
- * will load the cache image, and then call
- * H5MF_tidy_self_referential_fsm_hack() to discard the cache image
+ * will load the cache image, and then call
+ * H5MF_tidy_self_referential_fsm_hack() to discard the cache image
* block.
*/
if(f->shared->first_alloc_dealloc) {
@@ -2554,50 +2554,50 @@ done:
/*-------------------------------------------------------------------------
* Function: H5MF_settle_raw_data_fsm()
*
- * Purpose: Handle any tasks required before the metadata cache
+ * Purpose: Handle any tasks required before the metadata cache
* can serialize or flush the raw data free space manager
- * and any metadata free space managers that reside in the
+ * and any metadata free space managers that reside in the
* raw data free space manager ring.
*
- * Specifically, this means any metadata managers that DON'T
- * handle space allocation for free space manager header or
- * section info will reside in the raw data free space manager
+ * Specifically, this means any metadata managers that DON'T
+ * handle space allocation for free space manager header or
+ * section info will reside in the raw data free space manager
* ring.
*
- * In the absence of page allocation, there is at most one
+ * In the absence of page allocation, there is at most one
* free space manager per memory type defined in H5F_mem_t.
- * Of these, the one that allocates H5FD_MEM_DRAW will
+ * Of these, the one that allocates H5FD_MEM_DRAW will
* always reside in the raw data free space manager ring.
- * If there is more than one metadata free space manager,
- * all that don't handle H5FD_MEM_FSPACE_HDR or
- * H5FD_MEM_FSPACE_SINFO (which map to H5FD_MEM_OHDR and
- * H5FD_MEM_LHEAP respectively) will reside in the raw
+ * If there is more than one metadata free space manager,
+ * all that don't handle H5FD_MEM_FSPACE_HDR or
+ * H5FD_MEM_FSPACE_SINFO (which map to H5FD_MEM_OHDR and
+ * H5FD_MEM_LHEAP respectively) will reside in the raw
* data free space manager ring as well
*
- * With page allocation, the situation is conceptually
+ * With page allocation, the situation is conceptually
* identical, but more complex in practice.
*
- * In the worst case (multi file driver) page allocation
- * can result in two free space managers for each memory
+ * In the worst case (multi file driver) page allocation
+ * can result in two free space managers for each memory
* type -- one for small (less than on equal to one page)
* allocations, and one for large (greater than one page)
* allocations.
*
* In the more common one file case, page allocation will
- * result in a total of three free space managers -- one for
- * small (<= one page) raw data allocations, one for small
- * metadata allocations (i.e, all memory types other than
- * H5FD_MEM_DRAW), and one for all large (> one page)
+ * result in a total of three free space managers -- one for
+ * small (<= one page) raw data allocations, one for small
+ * metadata allocations (i.e, all memory types other than
+ * H5FD_MEM_DRAW), and one for all large (> one page)
* allocations.
*
* Despite these complications, the solution is the same in
- * the page allocation case -- free space managers (be they
- * small data or large) are assigned to the raw data free
+ * the page allocation case -- free space managers (be they
+ * small data or large) are assigned to the raw data free
* space manager ring if they don't allocate file space for
- * free space managers. Note that in the one file case, the
+ * free space managers. Note that in the one file case, the
* large free space manager must be assigned to the metadata
- * free space manager ring, as it both allocates pages for
- * the metadata free space manager, and allocates space for
+ * free space manager ring, as it both allocates pages for
+ * the metadata free space manager, and allocates space for
* large (> 1 page) metadata cache entries.
*
* At present, the task list for this routine is:
@@ -2607,14 +2607,14 @@ done:
* a) Free both aggregators. Space not at EOA will be
* added to the appropriate free space manager.
*
- * The raw data aggregator should not be restarted
+ * The raw data aggregator should not be restarted
* after this point. It is possible that the metadata
* aggregator will be.
*
* b) Free all file space currently allocated to free
* space managers.
*
- * c) Delete the free space manager superblock
+ * c) Delete the free space manager superblock
* extension message if allocated.
*
* This done, reduce the EOA by moving it to just before
@@ -2622,23 +2622,23 @@ done:
*
* 2) Ensure that space is allocated for the free space
* manager superblock extension message. Must do this
- * now, before reallocating file space for free space
+ * now, before reallocating file space for free space
* managers, as it is possible that this allocation may
* grab the last section in a FSM -- making it unnecessary
* to re-allocate file space for it.
*
* 3) Scan all free space managers not involved in allocating
* space for free space managers. For each such free space
- * manager, test to see if it contains free space. If
+ * manager, test to see if it contains free space. If
* it does, allocate file space for its header and section
- * data. If it contains no free space, leave it without
- * allocated file space as there is no need to save it to
+ * data. If it contains no free space, leave it without
+ * allocated file space as there is no need to save it to
* file.
*
* Note that all free space managers in this class should
- * see no further space allocations / deallocations as
- * at this point, all raw data allocations should be
- * finalized, as should all metadata allocations not
+ * see no further space allocations / deallocations as
+ * at this point, all raw data allocations should be
+ * finalized, as should all metadata allocations not
* involving free space managers.
*
* We will allocate space for free space managers involved
@@ -2673,7 +2673,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled)
HDassert(f->shared);
HDassert(fsm_settled);
- /* Only need to settle things if we are persisting the free space info
+ /* Only need to settle things if we are persisting the free space info
* and allocation/deallocation has occurred.
*/
if(f->shared->fs_persist && !f->shared->first_alloc_dealloc) {
@@ -2707,7 +2707,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled)
* Note that while the raw data aggregator should not be restarted during
* the close process, this need not be the case for the metadata aggregator.
*
- * Note also that the aggregators will not exist if page aggregation
+ * Note also that the aggregators will not exist if page aggregation
* is enabled -- skip this if so.
*/
/* Vailin -- is this correct? */
@@ -2743,10 +2743,10 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled)
* referential nature of the problem. These FSMs are dealt with in
* H5MF_settle_meta_data_fsm().
*
- * Since paged allocation may be enabled, there may be up to two
+ * Since paged allocation may be enabled, there may be up to two
* free space managers per memory type -- one for small and one for
* large allocation. Hence we must loop over the memory types twice
- * setting the allocation size accordingly if paged allocation is
+ * setting the allocation size accordingly if paged allocation is
* enabled.
*/
for(pass_count = 0; pass_count <= 1; pass_count++) {
@@ -2807,7 +2807,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled)
/* Check if the free space manager has space in the file */
if(H5F_addr_defined(fs_stat.addr) || H5F_addr_defined(fs_stat.sect_addr)) {
- /* Delete the free space manager in the file. Will
+ /* Delete the free space manager in the file. Will
* reallocate later if the free space manager contains
* any free space.
*/
@@ -2832,7 +2832,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled)
* extension messages will choke if the target message is
* unexpectedly either absent or present.
*
- * Update: This is probably unnecessary, as I gather that the
+ * Update: This is probably unnecessary, as I gather that the
* file space manager info message is guaranteed to exist.
* Leave it in for now, but consider removing it.
*/
@@ -2935,9 +2935,9 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled)
fsm_visited[fsm_type] = TRUE;
if(f->shared->fs_man[fsm_type]) {
- /* Only allocate file space if the target free space manager
- * doesn't allocate file space for free space managers. Note
- * that this is also the deciding factor as to whether a FSM
+ /* Only allocate file space if the target free space manager
+ * doesn't allocate file space for free space managers. Note
+ * that this is also the deciding factor as to whether a FSM
* in in the raw data FSM ring.
*/
if(!H5MF__fsm_type_is_self_referential(f, fsm_type)) {
@@ -3018,15 +3018,15 @@ done:
/*-------------------------------------------------------------------------
* Function: H5MF_settle_meta_data_fsm()
*
- * Purpose: If the free space manager is persistent, handle any tasks
- * required before the metadata cache can serialize or flush
- * the metadata free space manager(s) that handle file space
+ * Purpose: If the free space manager is persistent, handle any tasks
+ * required before the metadata cache can serialize or flush
+ * the metadata free space manager(s) that handle file space
* allocation for free space managers.
*
- * In most cases, there will be only one manager assigned
+ * In most cases, there will be only one manager assigned
* to this role. However, since for reasons unknown,
- * free space manager headers and section info blocks are
- * different classes of memory, it is possible that two free
+ * free space manager headers and section info blocks are
+ * different classes of memory, it is possible that two free
* space managers will be involved.
*
* On entry to this function, the raw data settle routine
@@ -3043,23 +3043,23 @@ done:
* 5) Re-created the free space manager superblock extension
* message.
*
- * 6) Reallocated file space for all non-empty free space
- * managers NOT involved in allocation of space for free
+ * 6) Reallocated file space for all non-empty free space
+ * managers NOT involved in allocation of space for free
* space managers.
*
* Note that these free space managers (if not empty) should
* have been written to file by this point, and that no
- * further space allocations involving them should take
+ * further space allocations involving them should take
* place during file close.
*
* On entry to this routine, the free space manager(s) involved
* in allocation of file space for free space managers should
- * still be floating. (i.e. should not have any file space
+ * still be floating. (i.e. should not have any file space
* allocated to them.)
*
- * Similarly, the raw data aggregator should not have been
- * restarted. Note that it is probable that reallocation of
- * space in 5) and 6) above will have re-started the metadata
+ * Similarly, the raw data aggregator should not have been
+ * restarted. Note that it is probable that reallocation of
+ * space in 5) and 6) above will have re-started the metadata
* aggregator.
*
*
@@ -3071,43 +3071,43 @@ done:
* 2) Free the aggregators.
*
* 3) Reduce the EOA to the extent possible, and make note
- * of the resulting value. This value will be stored
+ * of the resulting value. This value will be stored
* in the fsinfo superblock extension message and be used
* in the subsequent file open.
*
* 4) Re-allocate space for any free space manager(s) that:
*
- * a) are involved in allocation of space for free space
- * managers, and
+ * a) are involved in allocation of space for free space
+ * managers, and
*
* b) contain free space.
*
- * It is possible that we could allocate space for one
- * of these free space manager(s) only to have the allocation
- * result in the free space manager being empty and thus
+ * It is possible that we could allocate space for one
+ * of these free space manager(s) only to have the allocation
+ * result in the free space manager being empty and thus
* obliging us to free the space again. Thus there is the
* potential for an infinite loop if we want to avoid saving
* empty free space managers.
*
- * Similarly, it is possible that we could allocate space
- * for a section info block, only to discover that this
- * allocation has changed the size of the section info --
+ * Similarly, it is possible that we could allocate space
+ * for a section info block, only to discover that this
+ * allocation has changed the size of the section info --
* forcing us to deallocate and start the loop over again.
*
* To avoid this, simply allocate file space for these
- * FSM(s) directly from the VFD layer if allocation is
+ * FSM(s) directly from the VFD layer if allocation is
* indicated. This avoids the issue by bypassing the FSMs
- * in this case.
+ * in this case.
*
- * Note that this may increase the size of the file needlessly.
- * A better solution would be to modify the FSM code to
+ * Note that this may increase the size of the file needlessly.
+ * A better solution would be to modify the FSM code to
* save empty FSMs to file, and to allow section info blocks
- * to be oversized. However, given that the FSM code is
- * also used by the fractal heaps, and that we are under
- * severe time pressure at the moment, the above brute
- * force solution is attractive.
+ * to be oversized. However, given that the FSM code is
+ * also used by the fractal heaps, and that we are under
+ * severe time pressure at the moment, the above brute
+ * force solution is attractive.
*
- * 5) Make note of the EOA -- used for sanity checking on
+ * 5) Make note of the EOA -- used for sanity checking on
* FSM shutdown.
*
* Return: SUCCEED/FAIL
@@ -3142,7 +3142,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled)
HDassert(f->shared);
HDassert(fsm_settled);
- /* Only need to settle things if we are persisting the free space info
+ /* Only need to settle things if we are persisting the free space info
* and allocation/deallocation has occurred.
*/
if(f->shared->fs_persist && !f->shared->first_alloc_dealloc) {
@@ -3227,8 +3227,8 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled)
HDassert(fs_stat.alloc_sect_size == 0);
} /* end if */
- /* Verify that lg_sinfo_fspace is floating if it
- * exists and is distinct
+ /* Verify that lg_sinfo_fspace is floating if it
+ * exists and is distinct
*/
if((lg_sinfo_fspace) && (lg_hdr_fspace != lg_sinfo_fspace)) {
/* Query free space manager info for this type */
@@ -3247,7 +3247,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled)
* H5MF_free_aggrs() call. Note that the raw data aggregator must
* have already been freed. Sanity checks for this?
*
- * Note that the aggregators will not exist if paged aggregation
+ * Note that the aggregators will not exist if paged aggregation
* is enabled -- don't attempt to free if this is the case.
*/
/* Vailin -- is this correct? */
@@ -3259,38 +3259,38 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled)
if(H5MF__close_shrink_eoa(f) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
- /* At this point, the EOA should be set to a value that contains
+ /* At this point, the EOA should be set to a value that contains
* the allocation for all user data, all non self referential FSMs,
* the superblock and all superblock extension messages.
- *
- * Make note of the current EOA. We will store this value in the
+ *
+ * Make note of the current EOA. We will store this value in the
* free space manager superblock extension message. Since space for
* everything other than the self referential FSMs (and possibly the
- * cache image) has been allocated at this point, this allows us to
- * to float the self referential FSMs on the first file space allocation /
+ * cache image) has been allocated at this point, this allows us to
+ * to float the self referential FSMs on the first file space allocation /
* deallocation and then set the EOA to this value before we handle
- * the allocation / deallocation. (If a cache image exists, the
+ * the allocation / deallocation. (If a cache image exists, the
* first allocation / deallocation will be the deallocation of space
- * for the cache image).
+ * for the cache image).
*
- * WARNING: This approach settling the self referential free space
- * managers and allocating space for them in the file will
- * not work as currently implemented with the split and
- * multi file drivers, as the self referential free space
- * manager header and section info can be stored in up to
- * two different files -- requiring that up to two EOA's
- * be stored in the the free space managers super block
- * extension message.
+ * WARNING: This approach settling the self referential free space
+ * managers and allocating space for them in the file will
+ * not work as currently implemented with the split and
+ * multi file drivers, as the self referential free space
+ * manager header and section info can be stored in up to
+ * two different files -- requiring that up to two EOA's
+ * be stored in the the free space managers super block
+ * extension message.
*
- * As of this writing, we are solving this problem by
- * simply not supporting persistent FSMs with the split
+ * As of this writing, we are solving this problem by
+ * simply not supporting persistent FSMs with the split
* and multi file drivers.
*
- * Current plans are to do away with the multi file
+ * Current plans are to do away with the multi file
* driver, so this should be a non-issue in this case.
*
- * We should be able to support the split file driver
- * without a file format change. However, the code to
+ * We should be able to support the split file driver
+ * without a file format change. However, the code to
* do so does not exist at present.
*/
if(HADDR_UNDEF == (eoa_pre_fsm_fsalloc = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT)))
@@ -3364,7 +3364,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled)
} /* end if */
/* Get the eoa after allocation of file space for the self referential
- * free space managers. Assuming no cache image, this should be the
+ * free space managers. Assuming no cache image, this should be the
* final EOA of the file.
*/
if(HADDR_UNDEF == (eoa_post_fsm_fsalloc = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT)))
@@ -3373,7 +3373,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled)
/* All free space managers should have file space allocated for them
* now, and should see no further allocations / deallocations. Store
* the pre and post file space allocation for self referential FSMs EOA
- * for use when we actually write the free space manager superblock
+ * for use when we actually write the free space manager superblock
* extension message.
*/
f->shared->eoa_pre_fsm_fsalloc = eoa_pre_fsm_fsalloc;
@@ -3433,7 +3433,7 @@ H5MF__fsm_type_is_self_referential(H5F_t *f, H5F_mem_page_t fsm_type)
|| (fsm_type == lg_fshdr_fsm) || (fsm_type == lg_fssinfo_fsm);
} /* end if */
else {
- /* In principle, fsm_type should always be less than
+ /* In principle, fsm_type should always be less than
* H5F_MEM_PAGE_LARGE_SUPER whenever paged aggregation
* is not enabled. However, since there is code that does
* not observe this principle, force the result to FALSE if
@@ -3504,61 +3504,61 @@ H5MF__fsm_is_self_referential(H5F_t *f, H5FS_t *fspace)
*
* Purpose: As discussed in the comments of the settle routines above,
* the existence of self referential free space managers
- * as currently implemented creates the possibility of
+ * as currently implemented creates the possibility of
* infinite loops at file close.
*
* As a hack to avoid this, we have added code to settle
- * self referential free space managers, and then allocate
+ * self referential free space managers, and then allocate
* space for them directly from the file driver.
*
- * To avoid dropping ever increasing amounts of file space
+ * To avoid dropping ever increasing amounts of file space
* on the floor with each subsequent file close/open cycle,
* we need to clean this up on file open. To avoid this,
* this function is called on the first file space allocation
* or deallocation after file open to float the self referential
- * free space managers and reduce the EOA to the value it
- * had before the direct allocation of space for the self
+ * free space managers and reduce the EOA to the value it
+ * had before the direct allocation of space for the self
* referential free space managers.
*
* The function proceeds as follows:
*
- * 1) Verify that f->shared->first_alloc_dealloc is TRUE,
+ * 1) Verify that f->shared->first_alloc_dealloc is TRUE,
* and then set it to FALSE.
*
* 2) Get the current EOA. Verify that it is greater than
- * or equal to f->shared->eoa_pre_fsm_fsalloc. If the
+ * or equal to f->shared->eoa_pre_fsm_fsalloc. If the
* current eoa is equal to f->shared->eoa_pre_fsm_fsalloc,
* no self referential FSMs were stored, and we are done.
*
- * NOTE: This will have to be reworked somewhat for
+ * NOTE: This will have to be reworked somewhat for
* cache image.
*
* 3) Load the self referential FSMs. In passing verify that
- * the lowest address of a FSM header is equal to
+ * the lowest address of a FSM header is equal to
* f->shared->eoa_pre_fsm_fsalloc.
*
- * Note that we don't have to use any special I/O for
- * this -- we can use the regular I/O methods even if
+ * Note that we don't have to use any special I/O for
+ * this -- we can use the regular I/O methods even if
* paged aggregation and page buffering is enabled.
*
* 4) Float the FSMs. Ensure that the file space is NOT
* released.
*
- * 5) Set EOA equal to f->shared->eoa_pre_fsm_fsalloc,
- * and then set f->shared->eoa_pre_fsm_fsalloc to
+ * 5) Set EOA equal to f->shared->eoa_pre_fsm_fsalloc,
+ * and then set f->shared->eoa_pre_fsm_fsalloc to
* HADDR_UNDEF.
*
- * If page buffering, verify that the new EOA is
- * on a page boundary, and expunge any pages in the
+ * If page buffering, verify that the new EOA is
+ * on a page boundary, and expunge any pages in the
* page buffer after the new EOA.
*
* Note that this function is also called from test code
- * when it is necessary to startup a self referential
- * free space manager prior to the first file space
- * allocation / deallocation. Failure to do so will
+ * when it is necessary to startup a self referential
+ * free space manager prior to the first file space
+ * allocation / deallocation. Failure to do so will
* result in assertion failures in this function on
* the first file space allocation / deallocation.
- *
+ *
* Return: SUCCEED/FAIL
*
* Programmer: John Mainzer
@@ -3589,24 +3589,24 @@ H5MF_tidy_self_referential_fsm_hack(H5F_t *f)
HDassert(f->shared->fs_persist);
HDassert(f->shared->first_alloc_dealloc);
- /* Set the ring type in the API context. Since we are only dealing with
+ /* Set the ring type in the API context. Since we are only dealing with
* self referential FSMs, we will only need H5AC_RING_MDFSM.
*/
H5AC_set_ring(H5AC_RING_MDFSM, &orig_ring);
- /* 1) Verify that f->shared->first_alloc_dealloc is TRUE,
- * and then set it to FALSE.
+ /* 1) Verify that f->shared->first_alloc_dealloc is TRUE,
+ * and then set it to FALSE.
*/
HDassert(f->shared->first_alloc_dealloc);
f->shared->first_alloc_dealloc = FALSE;
/* 2) Get the current EOA. Verify that it is greater than
- * or equal to f->shared->eoa_pre_fsm_fsalloc. If the
+ * or equal to f->shared->eoa_pre_fsm_fsalloc. If the
* current eoa is equal to f->shared->eoa_pre_fsm_fsalloc,
* no self referential FSMs were stored, and we are done.
*
- * NOTE: This will have to be reworked somewhat for
+ * NOTE: This will have to be reworked somewhat for
* cache image.
*/
if(HADDR_UNDEF == (eoa = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT)))
@@ -3618,11 +3618,11 @@ H5MF_tidy_self_referential_fsm_hack(H5F_t *f)
/* 3) Load the self referential FSMs. In passing verify that
- * the lowest address of a FSM header is equal to
+ * the lowest address of a FSM header is equal to
* f->shared->eoa_pre_fsm_fsalloc.'
*
- * Note that we don't have to use any special I/O for
- * this -- we can use the regular I/O methods even if
+ * Note that we don't have to use any special I/O for
+ * this -- we can use the regular I/O methods even if
* paged aggregation and page buffering is enabled.
*/
H5MF__alloc_to_fs_type(f, H5FD_MEM_FSPACE_HDR, (size_t)1, &sm_fshdr_fs_type);
@@ -3705,7 +3705,7 @@ H5MF_tidy_self_referential_fsm_hack(H5F_t *f)
} /* end if */
} /* end if */
HDassert(H5F_addr_eq(first_srfsm_hdr, f->shared->eoa_pre_fsm_fsalloc));
-
+
/* 4) Float the FSMs. Ensure that the file space is NOT released. */
if(f->shared->fs_man[sm_fshdr_fs_type]) {
/* Sanity check: Query free space manager info for this type */
@@ -3757,12 +3757,12 @@ H5MF_tidy_self_referential_fsm_hack(H5F_t *f)
} /* end if */
} /* end if */
- /* 5) Set EOA equal to f->shared->eoa_pre_fsm_fsalloc,
- * and then set f->shared->eoa_pre_fsm_fsalloc to
+ /* 5) Set EOA equal to f->shared->eoa_pre_fsm_fsalloc,
+ * and then set f->shared->eoa_pre_fsm_fsalloc to
* HADDR_UNDEF.
*
- * If page buffering, verify that the new EOA is
- * on a page boundary, and expunge any pages in the
+ * If page buffering, verify that the new EOA is
+ * on a page boundary, and expunge any pages in the
* page buffer after the new EOA.
*/
if(!H5F_PAGED_AGGR(f)) {
diff --git a/src/H5MFaggr.c b/src/H5MFaggr.c
index 3db7f73..0991c5c 100644
--- a/src/H5MFaggr.c
+++ b/src/H5MFaggr.c
@@ -391,7 +391,7 @@ HDfprintf(stderr, "%s: ret_value = %a\n", FUNC, ret_value);
*-------------------------------------------------------------------------
*/
htri_t
-H5MF__aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr,
+H5MF__aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr,
H5FD_mem_t type, haddr_t blk_end, hsize_t extra_requested)
{
htri_t ret_value = FALSE; /* Return value */
@@ -405,7 +405,7 @@ H5MF__aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr,
/* Check if this aggregator is active */
if(f->shared->feature_flags & aggr->feature_flag) {
- /*
+ /*
* If the block being tested adjoins the beginning of the aggregator
* block, check if the aggregator can accommodate the extension.
*/
@@ -429,7 +429,7 @@ H5MF__aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr,
/*
* If extra_requested is above percentage threshold:
* 1) "bubble" up the aggregator by aggr->alloc_size or extra_requested
- * 2) extend the block into the aggregator
+ * 2) extend the block into the aggregator
*/
else {
hsize_t extra = (extra_requested < aggr->alloc_size) ? aggr->alloc_size : extra_requested;
diff --git a/src/H5MFdbg.c b/src/H5MFdbg.c
index 46f66e9..65e393f 100644
--- a/src/H5MFdbg.c
+++ b/src/H5MFdbg.c
@@ -115,8 +115,8 @@ H5MF__sects_debug_cb(H5FS_section_info_t *_sect, void *_udata)
/* Print generic section information */
HDfprintf(udata->stream, "%*s%-*s %s\n", udata->indent, "", udata->fwidth,
"Section type:",
- (sect->sect_info.type == H5MF_FSPACE_SECT_SIMPLE ? "simple" :
- (sect->sect_info.type == H5MF_FSPACE_SECT_SMALL ? "small" :
+ (sect->sect_info.type == H5MF_FSPACE_SECT_SIMPLE ? "simple" :
+ (sect->sect_info.type == H5MF_FSPACE_SECT_SMALL ? "small" :
(sect->sect_info.type == H5MF_FSPACE_SECT_LARGE ? "large" : "unknown"))));
HDfprintf(udata->stream, "%*s%-*s %a\n", udata->indent, "", udata->fwidth,
"Section address:",
diff --git a/src/H5MFprivate.h b/src/H5MFprivate.h
index 58e5054..01e2356 100644
--- a/src/H5MFprivate.h
+++ b/src/H5MFprivate.h
@@ -76,9 +76,9 @@ H5_DLL htri_t H5MF_aggrs_try_shrink_eoa(H5F_t *f);
H5_DLL herr_t H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled);
H5_DLL herr_t H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled);
-/* This function has to be declared in H5MFprivate.h as it is needed
- * in our test code to allow us to manually start a self referential
- * free space manager prior to the first file space allocations /
+/* This function has to be declared in H5MFprivate.h as it is needed
+ * in our test code to allow us to manually start a self referential
+ * free space manager prior to the first file space allocations /
* deallocation without causing assertion failures on the first
* file space allocation / deallocation.
*/
diff --git a/src/H5MFsection.c b/src/H5MFsection.c
index 3ebc5c8..a373360 100644
--- a/src/H5MFsection.c
+++ b/src/H5MFsection.c
@@ -186,7 +186,7 @@ H5FS_section_class_t H5MF_FSPACE_SECT_CLS_LARGE[1] = {{
/* Declare a free list to manage the H5MF_free_section_t struct */
H5FL_DEFINE(H5MF_free_section_t);
-/*
+/*
* "simple/small/large" section callbacks
*/
@@ -371,7 +371,7 @@ done:
FUNC_LEAVE_NOAPI((H5FS_section_info_t *)ret_value)
} /* end H5MF__sect_split() */
-/*
+/*
* "simple" section callbacks
*/
@@ -615,7 +615,7 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5MF__sect_simple_shrink() */
-/*
+/*
* "small" section callbacks
*/
@@ -625,7 +625,7 @@ done:
*
* Purpose: Perform actions on a small "meta" action before adding it to the free space manager:
* 1) Drop the section if it is at page end and its size <= page end threshold
- * 2) Adjust section size to include page end threshold if
+ * 2) Adjust section size to include page end threshold if
* (section size + threshold) is at page end
*
* Return: Success: non-negative
@@ -792,7 +792,7 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5MF__sect_small_merge() */
-/*
+/*
* "Large" section callbacks
*/
diff --git a/src/H5Oattr.c b/src/H5Oattr.c
index 5084c37..7a17c6a 100644
--- a/src/H5Oattr.c
+++ b/src/H5Oattr.c
@@ -36,7 +36,7 @@ static void *H5O__attr_copy_file(H5F_t *file_src, const H5O_msg_class_t *mesg_ty
void *native_src, H5F_t *file_dst, hbool_t *recompute_size,
H5O_copy_t *cpy_info, void *udata);
static herr_t H5O__attr_post_copy_file(const H5O_loc_t *src_oloc,
- const void *mesg_src, H5O_loc_t *dst_oloc, void *mesg_dst,
+ const void *mesg_src, H5O_loc_t *dst_oloc, void *mesg_dst,
H5O_copy_t *cpy_info);
static herr_t H5O_attr_get_crt_index(const void *_mesg, H5O_msg_crt_idx_t *crt_idx);
static herr_t H5O_attr_set_crt_index(void *_mesg, H5O_msg_crt_idx_t crt_idx);
diff --git a/src/H5Oattribute.c b/src/H5Oattribute.c
index 1888e45..da50860 100644
--- a/src/H5Oattribute.c
+++ b/src/H5Oattribute.c
@@ -842,7 +842,7 @@ H5O_attr_write_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/,
HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to load object header chunk")
/* Because the attribute structure is shared now. The only situation that requires
- * copying the data is when the metadata cache evicts and reloads this attribute.
+ * copying the data is when the metadata cache evicts and reloads this attribute.
* The shared attribute structure will be different in that situation. SLU-2010/7/29 */
if(((H5A_t *)mesg->native)->shared != udata->attr->shared) {
/* Sanity check */
diff --git a/src/H5Ocache.c b/src/H5Ocache.c
index 2c3f4b4..3a8611a 100644
--- a/src/H5Ocache.c
+++ b/src/H5Ocache.c
@@ -65,17 +65,17 @@ static herr_t H5O__cache_get_final_load_size(const void *image_ptr, size_t image
void *udata, size_t *actual_len);
static htri_t H5O__cache_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5O__cache_deserialize(const void *image, size_t len,
- void *udata, hbool_t *dirty);
+ void *udata, hbool_t *dirty);
static herr_t H5O__cache_image_len(const void *thing, size_t *image_len);
static herr_t H5O__cache_serialize(const H5F_t *f, void *image, size_t len,
- void *thing);
+ void *thing);
static herr_t H5O__cache_notify(H5AC_notify_action_t action, void *_thing);
static herr_t H5O__cache_free_icr(void *thing);
static herr_t H5O__cache_chk_get_initial_load_size(void *udata, size_t *image_len);
static htri_t H5O__cache_chk_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5O__cache_chk_deserialize(const void *image, size_t len,
- void *udata, hbool_t *dirty);
+ void *udata, hbool_t *dirty);
static herr_t H5O__cache_chk_image_len(const void *thing, size_t *image_len);
static herr_t H5O__cache_chk_serialize(const H5F_t *f, void *image, size_t len,
void *thing);
@@ -160,7 +160,7 @@ H5FL_SEQ_DEFINE(H5O_cont_t);
/*-------------------------------------------------------------------------
* Function: H5O__cache_get_initial_load_size()
*
- * Purpose: Tell the metadata cache how much data to read from file in
+ * Purpose: Tell the metadata cache how much data to read from file in
* the first speculative read for the object header.
*
* Return: Success: SUCCEED
@@ -287,12 +287,12 @@ H5O__cache_verify_chksum(const void *_image, size_t len, void *_udata)
/*-------------------------------------------------------------------------
* Function: H5O__cache_deserialize
*
- * Purpose: Attempt to deserialize the object header contained in the
- * supplied buffer, load the data into an instance of H5O_t, and
+ * Purpose: Attempt to deserialize the object header contained in the
+ * supplied buffer, load the data into an instance of H5O_t, and
* return a pointer to the new instance.
*
- * Note that the object header is read with with a speculative read.
- * If the initial read is too small, make note of this fact and return
+ * Note that the object header is read with with a speculative read.
+ * If the initial read is too small, make note of this fact and return
* without error. H5C_load_entry() will note the size discrepency
* and retry the deserialize operation with the correct size read.
*
@@ -316,7 +316,7 @@ H5O__cache_deserialize(const void *image, size_t len, void *_udata,
/* Check arguments */
HDassert(image);
- HDassert(len > 0);
+ HDassert(len > 0);
HDassert(udata);
HDassert(udata->common.f);
HDassert(udata->common.cont_msg_info);
@@ -440,15 +440,15 @@ H5O__cache_serialize(const H5F_t *f, void *image, size_t len, void *_thing)
H5O_assert(oh);
#endif /* H5O_DEBUG */
- /* Point to raw data 'image' for first chunk, which
- * has room for the prefix
+ /* Point to raw data 'image' for first chunk, which
+ * has room for the prefix
*/
chunk_image = oh->chunk[0].image;
/* Later versions of object header prefix have different format and
* also require that chunk 0 always be updated, since the checksum
* on the entire block of memory needs to be updated if anything is
- * modified
+ * modified
*/
if(oh->version > H5O_VERSION_1) {
uint64_t chunk0_size; /* Size of chunk 0's data */
@@ -535,11 +535,11 @@ H5O__cache_serialize(const H5F_t *f, void *image, size_t len, void *_thing)
HDassert((size_t)(chunk_image - oh->chunk[0].image) == (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh)));
/* Serialize messages for this chunk */
- if(H5O__chunk_serialize(f, oh, (unsigned)0) < 0)
+ if(H5O__chunk_serialize(f, oh, (unsigned)0) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize first object header chunk")
- /* copy the chunk into the image -- this is potentially expensive.
- * Can we rework things so that the object header and the cache
+ /* copy the chunk into the image -- this is potentially expensive.
+ * Can we rework things so that the object header and the cache
* share a buffer?
*/
H5MM_memcpy(image, oh->chunk[0].image, len);
@@ -650,7 +650,7 @@ done:
*/
static herr_t
H5O__cache_free_icr(void *_thing)
-{
+{
H5O_t *oh = (H5O_t *)_thing; /* Object header to destroy */
herr_t ret_value = SUCCEED; /* Return value */
@@ -673,8 +673,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5O__cache_chk_get_initial_load_size()
*
- * Purpose: Tell the metadata cache how large the on disk image of the
- * chunk proxy is, so it can load the image into a buffer for the
+ * Purpose: Tell the metadata cache how large the on disk image of the
+ * chunk proxy is, so it can load the image into a buffer for the
* deserialize call.
*
* Return: Success: SUCCEED
@@ -750,7 +750,7 @@ H5O__cache_chk_verify_chksum(const void *_image, size_t len, void *_udata)
* Function: H5O__cache_chk_deserialize
*
* Purpose: Attempt to deserialize the object header continuation chunk
- * contained in the supplied buffer, load the data into an instance
+ * contained in the supplied buffer, load the data into an instance
* of H5O_chunk_proxy_t, and return a pointer to the new instance.
*
* Return: Success: Pointer to in core representation
@@ -779,7 +779,7 @@ H5O__cache_chk_deserialize(const void *image, size_t len, void *_udata,
HDassert(dirty);
/* Allocate space for the object header data structure */
- if(NULL == (chk_proxy = H5FL_CALLOC(H5O_chunk_proxy_t)))
+ if(NULL == (chk_proxy = H5FL_CALLOC(H5O_chunk_proxy_t)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, NULL, "memory allocation failed")
/* Check if we are still decoding the object header */
@@ -803,7 +803,7 @@ H5O__cache_chk_deserialize(const void *image, size_t len, void *_udata,
/* Set the chunk number for the chunk proxy */
chk_proxy->chunkno = udata->chunkno;
- /* Sanity check that the chunk representation we have in memory is
+ /* Sanity check that the chunk representation we have in memory is
* the same as the one being brought in from disk.
*/
HDassert(0 == HDmemcmp(image, udata->oh->chunk[chk_proxy->chunkno].image, udata->oh->chunk[chk_proxy->chunkno].size));
@@ -829,7 +829,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5O__cache_chk_image_len
*
- * Purpose: Return the on disk image size of a object header chunk to the
+ * Purpose: Return the on disk image size of a object header chunk to the
* metadata cache via the image_len.
*
* Return: Success: SUCCEED
@@ -863,9 +863,9 @@ H5O__cache_chk_image_len(const void *_thing, size_t *image_len)
/*-------------------------------------------------------------------------
* Function: H5O__cache_chk_serialize
*
- * Purpose: Given a pointer to an instance of an object header chunk and an
- * appropriately sized buffer, serialize the contents of the
- * instance for writing to disk, and copy the serialized data
+ * Purpose: Given a pointer to an instance of an object header chunk and an
+ * appropriately sized buffer, serialize the contents of the
+ * instance for writing to disk, and copy the serialized data
* into the buffer.
*
* Return: Success: SUCCEED
@@ -1262,19 +1262,19 @@ H5O__prefix_deserialize(const uint8_t *_image, H5O_cache_ud_t *udata)
/* Verify object header prefix length */
HDassert((size_t)(image - _image) == (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh)));
- /* If udata->oh is to be freed (see H5O__cache_verify_chksum),
+ /* If udata->oh is to be freed (see H5O__cache_verify_chksum),
save the pointer to udata->oh and free it later after setting
udata->oh with the new object header */
if(udata->free_oh) {
H5O_t *saved_oh = udata->oh;
- HDassert(udata->oh);
+ HDassert(udata->oh);
/* Save the object header for later use in 'deserialize' callback */
udata->oh = oh;
if(H5O__free(saved_oh) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "can't destroy object header")
udata->free_oh = FALSE;
- } else
+ } else
/* Save the object header for later use in 'deserialize' callback */
udata->oh = oh;
@@ -1490,7 +1490,7 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
mesg->type = H5O_msg_class_g[H5O_UNKNOWN_ID];
/* Check for "fail if unknown" message flags */
- if(((udata->file_intent & H5F_ACC_RDWR) &&
+ if(((udata->file_intent & H5F_ACC_RDWR) &&
(flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_AND_OPEN_FOR_WRITE))
|| (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_ALWAYS))
HGOTO_ERROR(H5E_OHDR, H5E_BADMESG, FAIL, "unknown message with 'fail if unknown' flag found")
diff --git a/src/H5Ocache_image.c b/src/H5Ocache_image.c
index 591ac4a..d9b926f 100644
--- a/src/H5Ocache_image.c
+++ b/src/H5Ocache_image.c
@@ -39,10 +39,10 @@
/* Callbacks for message class */
static void *H5O__mdci_decode(H5F_t *f, H5O_t *open_oh, unsigned mesg_flags,
unsigned *ioflags, size_t p_size, const uint8_t *p);
-static herr_t H5O__mdci_encode(H5F_t *f, hbool_t disable_shared,
+static herr_t H5O__mdci_encode(H5F_t *f, hbool_t disable_shared,
uint8_t *p, const void *_mesg);
static void *H5O__mdci_copy(const void *_mesg, void *_dest);
-static size_t H5O__mdci_size(const H5F_t *f, hbool_t disable_shared,
+static size_t H5O__mdci_size(const H5F_t *f, hbool_t disable_shared,
const void *_mesg);
static herr_t H5O__mdci_free(void *mesg);
static herr_t H5O__mdci_delete(H5F_t *f, H5O_t *open_oh, void *_mesg);
@@ -142,7 +142,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5O__mdci_encode(H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared,
+H5O__mdci_encode(H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared,
uint8_t *p, const void *_mesg)
{
const H5O_mdci_t *mesg = (const H5O_mdci_t *)_mesg;
@@ -219,7 +219,7 @@ done:
*-------------------------------------------------------------------------
*/
static size_t
-H5O__mdci_size(const H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared,
+H5O__mdci_size(const H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared,
const void H5_ATTR_UNUSED *_mesg)
{
size_t ret_value = 0; /* Return value */
@@ -227,7 +227,7 @@ H5O__mdci_size(const H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared,
FUNC_ENTER_STATIC_NOERR
/* Set return value */
- ret_value = (size_t)( 1 + /* Version number */
+ ret_value = (size_t)( 1 + /* Version number */
H5F_SIZEOF_ADDR(f) + /* addr of metadata cache */
/* image block */
H5F_SIZEOF_SIZE(f) ); /* length of metadata cache */
diff --git a/src/H5Odtype.c b/src/H5Odtype.c
index a349cc5..61fa32a 100644
--- a/src/H5Odtype.c
+++ b/src/H5Odtype.c
@@ -43,7 +43,7 @@ static void *H5O__dtype_copy_file(H5F_t *file_src, const H5O_msg_class_t *mesg_t
void *native_src, H5F_t *file_dst, hbool_t *recompute_size,
H5O_copy_t *cpy_info, void *udata);
static herr_t H5O__dtype_shared_post_copy_upd(const H5O_loc_t *src_oloc,
- const void *mesg_src, H5O_loc_t *dst_oloc, void *mesg_dst,
+ const void *mesg_src, H5O_loc_t *dst_oloc, void *mesg_dst,
H5O_copy_t *cpy_info);
static herr_t H5O__dtype_debug(H5F_t *f, const void *_mesg, FILE * stream,
int indent, int fwidth);
diff --git a/src/H5Olayout.c b/src/H5Olayout.c
index 2866267..bba332a 100644
--- a/src/H5Olayout.c
+++ b/src/H5Olayout.c
@@ -373,7 +373,7 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh,
/* Check version */
if(mesg->version < H5O_LAYOUT_VERSION_4)
HGOTO_ERROR(H5E_OHDR, H5E_VERSION, NULL, "invalid layout version with virtual layout")
-
+
/* Heap information */
H5F_addr_decode(f, &p, &(mesg->storage.u.virt.serial_list_hobjid.addr));
UINT32DECODE(p, mesg->storage.u.virt.serial_list_hobjid.idx);
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index 32df33f..6f54a31 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -970,7 +970,7 @@ H5_DLL herr_t H5O_refresh_metadata_reopen(hid_t oid, H5G_loc_t *obj_loc, hbool_t
H5_DLL herr_t H5O_copy_header_map(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
H5O_copy_t *cpy_info, hbool_t inc_depth,
H5O_type_t *obj_type, void **udata);
-H5_DLL herr_t H5O_copy_expand_ref(H5F_t *file_src, void *_src_ref,
+H5_DLL herr_t H5O_copy_expand_ref(H5F_t *file_src, void *_src_ref,
H5F_t *file_dst, void *_dst_ref, size_t ref_count, H5R_type_t ref_type,
H5O_copy_t *cpy_info);
diff --git a/src/H5R.c b/src/H5R.c
index 95abc32..1c89b22 100644
--- a/src/H5R.c
+++ b/src/H5R.c
@@ -323,7 +323,7 @@ done:
M. Scot Breitenfeld
22 January 2014
Changed the behavior for the returned value of the function when name is NULL.
- If name is NULL then size is ignored and the function returns the size
+ If name is NULL then size is ignored and the function returns the size
of the name buffer (not including the NULL terminator), it still returns
negative on failure.
--------------------------------------------------------------------------*/
diff --git a/src/H5S.c b/src/H5S.c
index f36446c..ad15be3 100644
--- a/src/H5S.c
+++ b/src/H5S.c
@@ -240,7 +240,7 @@ H5S_get_validated_dataspace(hid_t space_id, const H5S_t **space)
if (space_id < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid space_id (ID cannot be a negative number)")
-
+
if (H5S_ALL == space_id) {
/* No special dataspace struct for H5S_ALL */
*space = NULL;
@@ -1431,7 +1431,7 @@ H5S_set_extent_simple(H5S_t *space, unsigned rank, const hsize_t *dims,
} /* end for */
space->extent.nelem = nelem;
- /* Copy the maximum dimensions if specified. Otherwise, the maximal dimensions are the
+ /* Copy the maximum dimensions if specified. Otherwise, the maximal dimensions are the
* same as the dimension */
space->extent.max = (hsize_t *)H5FL_ARR_MALLOC(hsize_t, (size_t)rank);
if(max != NULL) {
@@ -1479,7 +1479,7 @@ done:
*
* Modification:
* Raymond Lu 03/30/2011
- * We allow 0-dimension for non-unlimited dimension starting
+ * We allow 0-dimension for non-unlimited dimension starting
* from 1.8.7 release.
*
*-------------------------------------------------------------------------
diff --git a/src/H5SM.c b/src/H5SM.c
index 885ddc1..e3acc2d 100644
--- a/src/H5SM.c
+++ b/src/H5SM.c
@@ -57,7 +57,7 @@ static herr_t H5SM__create_index(H5F_t *f, H5SM_index_header_t *header);
static herr_t H5SM__delete_index(H5F_t *f, H5SM_index_header_t *header,
hbool_t delete_heap);
static haddr_t H5SM__create_list(H5F_t *f, H5SM_index_header_t *header);
-static herr_t H5SM__find_in_list(const H5SM_list_t *list, const H5SM_mesg_key_t *key,
+static herr_t H5SM__find_in_list(const H5SM_list_t *list, const H5SM_mesg_key_t *key,
size_t *empty_pos, size_t *list_pos);
static herr_t H5SM__convert_list_to_btree(H5F_t * f, H5SM_index_header_t * header,
H5SM_list_t **_list, H5HF_t *fheap, H5O_t *open_oh);
diff --git a/src/H5SMcache.c b/src/H5SMcache.c
index ee28fa4..1168414 100644
--- a/src/H5SMcache.c
+++ b/src/H5SMcache.c
@@ -59,16 +59,16 @@
static herr_t H5SM__cache_table_get_initial_load_size(void *udata, size_t *image_len);
static htri_t H5SM__cache_table_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5SM__cache_table_deserialize(const void *image, size_t len,
- void *udata, hbool_t *dirty);
+ void *udata, hbool_t *dirty);
static herr_t H5SM__cache_table_image_len(const void *thing, size_t *image_len);
static herr_t H5SM__cache_table_serialize(const H5F_t *f, void *image,
- size_t len, void *thing);
+ size_t len, void *thing);
static herr_t H5SM__cache_table_free_icr(void *thing);
static herr_t H5SM__cache_list_get_initial_load_size(void *udata, size_t *image_len);
static htri_t H5SM__cache_list_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5SM__cache_list_deserialize(const void *image, size_t len,
- void *udata, hbool_t *dirty);
+ void *udata, hbool_t *dirty);
static herr_t H5SM__cache_list_image_len(const void *thing, size_t *image_len);
static herr_t H5SM__cache_list_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
@@ -129,7 +129,7 @@ const H5AC_class_t H5AC_SOHM_LIST[1] = {{
/*-------------------------------------------------------------------------
* Function: H5SM__cache_table_get_initial_load_size()
*
- * Purpose: Return the size of the master table of Shared Object Header
+ * Purpose: Return the size of the master table of Shared Object Header
* Message indexes on disk.
*
* Return: Success: SUCCEED
@@ -198,9 +198,9 @@ H5SM__cache_table_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNU
/*-------------------------------------------------------------------------
* Function: H5SM__cache_table_deserialize
*
- * Purpose: Given a buffer containing the on disk representation of the
+ * Purpose: Given a buffer containing the on disk representation of the
* master table of Shared Object Header Message indexes, deserialize
- * the table, copy the contents into a newly allocated instance of
+ * the table, copy the contents into a newly allocated instance of
* H5SM_master_table_t, and return a pointer to the new instance.
*
* Return: Success: Pointer to in core representation
@@ -264,7 +264,7 @@ H5SM__cache_table_deserialize(const void *_image, size_t len, void *_udata,
/* Read in the index headers */
for(u = 0; u < table->num_indexes; ++u) {
/* Verify correct version of index list */
- if(H5SM_LIST_VERSION != *image++)
+ if(H5SM_LIST_VERSION != *image++)
HGOTO_ERROR(H5E_SOHM, H5E_VERSION, NULL, "bad shared message list version number")
/* Type of the index (list or B-tree) */
@@ -555,8 +555,8 @@ H5SM__cache_list_verify_chksum(const void *_image, size_t H5_ATTR_UNUSED len, vo
/*-------------------------------------------------------------------------
* Function: H5SM__cache_list_deserialize
*
- * Purpose: Given a buffer containing the on disk image of a list of
- * SOHM message, deserialize the list, load it into a newly allocated
+ * Purpose: Given a buffer containing the on disk image of a list of
+ * SOHM message, deserialize the list, load it into a newly allocated
* instance of H5SM_list_t, and return a pointer to same.
*
* Return: Success: Pointer to in core representation
diff --git a/src/H5Shyper.c b/src/H5Shyper.c
index 72fc503..c462d1a 100644
--- a/src/H5Shyper.c
+++ b/src/H5Shyper.c
@@ -1962,7 +1962,7 @@ done:
const H5S_t *space: IN: The dataspace
hsize_t block_count: IN: The number of blocks in the selection
uint32_t *version: OUT: The version to use for encoding
- uint8_t *enc_size: OUT: The encoded size to use
+ uint8_t *enc_size: OUT: The encoded size to use
RETURNS
The version and the size to encode hyperslab selection info
@@ -2020,12 +2020,12 @@ H5S_hyper_get_version_enc_size(const H5S_t *space, hsize_t block_count, uint32_t
/* If exceed (2^32 -1) */
if(count_up_version || bound_up_version)
tmp_version = H5S_HYPER_VERSION_2;
- else
+ else
/* block_count < 4: version 1 */
/* block_count >= 4: determined by low bound */
tmp_version = (block_count < 4) ? H5S_HYPER_VERSION_1 : H5O_sds_hyper_ver_bounds[low_bound];
- } else {
+ } else {
/* Fail for irregular hyperslab if exceeds 32 bits */
if(count_up_version)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "The number of blocks in hyperslab selection exceeds 2^32")
@@ -2037,7 +2037,7 @@ H5S_hyper_get_version_enc_size(const H5S_t *space, hsize_t block_count, uint32_t
/* Version bounds check */
if(tmp_version > H5O_sds_hyper_ver_bounds[high_bound])
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "Dataspace hyperslab selection version out of bounds")
-
+
*version = tmp_version;
/* Determine the encoded size based on version */
@@ -4699,22 +4699,22 @@ H5S__hyper_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *of
/* Copy the diminfo */
while(base_space_dim < base_space->extent.rank) {
- new_space->select.sel_info.hslab->app_diminfo[new_space_dim].start =
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].start =
base_space->select.sel_info.hslab->app_diminfo[base_space_dim].start;
- new_space->select.sel_info.hslab->app_diminfo[new_space_dim].stride =
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].stride =
base_space->select.sel_info.hslab->app_diminfo[base_space_dim].stride;
- new_space->select.sel_info.hslab->app_diminfo[new_space_dim].count =
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].count =
base_space->select.sel_info.hslab->app_diminfo[base_space_dim].count;
- new_space->select.sel_info.hslab->app_diminfo[new_space_dim].block =
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].block =
base_space->select.sel_info.hslab->app_diminfo[base_space_dim].block;
- new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].start =
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].start =
base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].start;
new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].stride =
base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].stride;
- new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].count =
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].count =
base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].count;
- new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].block =
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].block =
base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].block;
/* Advance to next dimensions */
@@ -7401,7 +7401,7 @@ H5S_select_hyperslab (H5S_t *space, H5S_seloper_t op,
/* Check for unlimited dimension */
for(u = 0; u<space->extent.rank; u++)
if((count[u] == H5S_UNLIMITED) || (block[u] == H5S_UNLIMITED)) {
- if(unlim_dim >= 0)
+ if(unlim_dim >= 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_UNSUPPORTED, FAIL, "cannot have more than one unlimited dimension in selection")
else {
if(count[u] == block[u] /* == H5S_UNLIMITED */)
@@ -9491,7 +9491,7 @@ H5S__hyper_project_intersection(const H5S_t *src_space, const H5S_t *dst_space,
HDassert(dst_space);
HDassert(src_intersect_space);
HDassert(proj_space);
-
+
/* Assert that src_space and src_intersect_space have same extent and there
* are no point selections */
HDassert(H5S_GET_EXTENT_NDIMS(src_space)
@@ -9660,7 +9660,7 @@ H5S__hyper_project_intersection(const H5S_t *src_space, const H5S_t *dst_space,
* selection and advance any sequences we complete */
if(ss_off[ss_i] >= sis_off[sis_i])
int_sel_off = ss_sel_off;
- else
+ else
int_sel_off = sis_off[sis_i] - ss_off[ss_i] + ss_sel_off;
if((ss_off[ss_i] + (hsize_t)ss_len[ss_i]) <= (sis_off[sis_i]
+ (hsize_t)sis_len[sis_i])) {
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index 935d279..60e2dc2 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -41,12 +41,12 @@ static herr_t H5S_mpio_all_type(const H5S_t *space, size_t elmt_size,
MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
static herr_t H5S_mpio_none_type(MPI_Datatype *new_type, int *count,
hbool_t *is_derived_type);
-static herr_t H5S_mpio_create_point_datatype(size_t elmt_size, hsize_t num_points,
+static herr_t H5S_mpio_create_point_datatype(size_t elmt_size, hsize_t num_points,
MPI_Aint *disp, MPI_Datatype *new_type);
static herr_t H5S_mpio_point_type(const H5S_t *space, size_t elmt_size,
MPI_Datatype *new_type, int *count, hbool_t *is_derived_type,
hbool_t do_permute, hsize_t **permute_map, hbool_t *is_permuted);
-static herr_t H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size,
+static herr_t H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size,
hsize_t **permute_map, MPI_Datatype *new_type, int *count,
hbool_t *is_derived_type);
static herr_t H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
@@ -198,9 +198,9 @@ H5S_mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5S_mpio_create_point_datatype (size_t elmt_size, hsize_t num_points,
- MPI_Aint *disp, MPI_Datatype *new_type)
+ MPI_Aint *disp, MPI_Datatype *new_type)
{
MPI_Datatype elmt_type; /* MPI datatype for individual element */
hbool_t elmt_type_created = FALSE; /* Whether the element MPI datatype was created */
@@ -239,7 +239,7 @@ H5S_mpio_create_point_datatype (size_t elmt_size, hsize_t num_points,
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
}
- else {
+ else {
/* use LARGE_DATATYPE::
* We'll create an hindexed_block type for every 2G point count and then combine
* those and any remaining points into a single large datatype.
@@ -373,7 +373,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
+H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
int *count, hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute,
hbool_t *is_permuted)
{
@@ -410,19 +410,19 @@ H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type
disp[u] = H5VM_array_offset(space->extent.rank, space->extent.size, curr->pnt);
disp[u] *= elmt_size;
- /* This is a File Space used to set the file view, so adjust the displacements
+ /* This is a File Space used to set the file view, so adjust the displacements
* to have them monotonically non-decreasing.
- * Generate the permutation array by indicating at each point being selected,
- * the position it will shifted in the new displacement. Example:
- * Suppose 4 points with corresponding are selected
- * Pt 1: disp=6 ; Pt 2: disp=3 ; Pt 3: disp=0 ; Pt 4: disp=4
+ * Generate the permutation array by indicating at each point being selected,
+ * the position it will shifted in the new displacement. Example:
+ * Suppose 4 points with corresponding are selected
+ * Pt 1: disp=6 ; Pt 2: disp=3 ; Pt 3: disp=0 ; Pt 4: disp=4
* The permute map to sort the displacements in order will be:
* point 1: map[0] = L, indicating that this point is not moved (1st point selected)
- * point 2: map[1] = 0, indicating that this point is moved to the first position,
+ * point 2: map[1] = 0, indicating that this point is moved to the first position,
* since disp_pt1(6) > disp_pt2(3)
- * point 3: map[2] = 0, move to position 0, bec it has the lowest disp between
+ * point 3: map[2] = 0, move to position 0, bec it has the lowest disp between
* the points selected so far.
- * point 4: map[3] = 2, move the 2nd position since point 1 has a higher disp,
+ * point 4: map[3] = 2, move the 2nd position since point 1 has a higher disp,
* but points 2 and 3 have lower displacements.
*/
if(do_permute) {
@@ -447,7 +447,7 @@ H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type
HDmemmove(disp + m + 1, disp + m, (u - m) * sizeof(MPI_Aint));
disp[m] = temp;
} /* end if */
- (*permute)[u] = m;
+ (*permute)[u] = m;
} /* end if */
else
(*permute)[u] = num_points;
@@ -508,7 +508,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
+H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
MPI_Aint *disp = NULL; /* Datatype displacement for each point*/
@@ -571,12 +571,12 @@ H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
/* Set the displacement of the current point */
disp[u] = curr_off;
- /* This is a memory displacement, so for each point selected,
+ /* This is a memory displacement, so for each point selected,
* apply the map that was generated by the file selection */
if((*permute)[u] != num_points) {
MPI_Aint temp = disp[u];
- HDmemmove(disp + (*permute)[u] + 1, disp + (*permute)[u],
+ HDmemmove(disp + (*permute)[u] + 1, disp + (*permute)[u],
(u - (*permute)[u]) * sizeof(MPI_Aint));
disp[(*permute)[u]] = temp;
} /* end if */
@@ -795,7 +795,7 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
#endif
/* LARGE_DATATYPE::
- * Check if the number of elements to form the inner type fits into a 32 bit integer.
+ * Check if the number of elements to form the inner type fits into a 32 bit integer.
* If yes then just create the innertype with MPI_Type_contiguous.
* Otherwise create a compound datatype by iterating as many times as needed
* for the innertype to be created.
@@ -848,8 +848,8 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code)
}
else {
- /* Things get a bit more complicated and require LARGE_DATATYPE processing
- * There are two MPI datatypes that need to be created:
+ /* Things get a bit more complicated and require LARGE_DATATYPE processing
+ * There are two MPI datatypes that need to be created:
* 1) an internal contiguous block; and
* 2) a collection of elements where an element is a contiguous block(1).
* Remember that the input arguments to the MPI-IO functions use integer
@@ -863,18 +863,18 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
MPI_Datatype block_type;
/* create a contiguous datatype inner_type x number of BLOCKS.
- * Again we need to check that the number of BLOCKS can fit into
+ * Again we need to check that the number of BLOCKS can fit into
* a 32 bit integer */
if (bigio_count < d[i].block) {
- if (H5S_mpio_create_large_type(d[i].block, 0, inner_type,
+ if (H5S_mpio_create_large_type(d[i].block, 0, inner_type,
&block_type) < 0) {
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
"couldn't ccreate a large block datatype in hyper selection")
}
}
else {
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)d[i].block,
- inner_type,
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)d[i].block,
+ inner_type,
&block_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
}
@@ -997,7 +997,7 @@ done:
*
* Modifications:
* Mohamad Chaarawi
- * Adding support for large datatypes (beyond the limit of a
+ * Adding support for large datatypes (beyond the limit of a
* 32 bit integer.
*-------------------------------------------------------------------------
*/
@@ -1156,8 +1156,8 @@ H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span,
}
}
else {
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)blocklen[i],
- *elmt_type,
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)blocklen[i],
+ *elmt_type,
&temp_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
}
@@ -1181,11 +1181,11 @@ H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span,
*span_type = outer_type;
}
- if (outer_type != MPI_DATATYPE_NULL)
+ if (outer_type != MPI_DATATYPE_NULL)
MPI_Type_free(&outer_type);
/* temp_type shouldn't be freed here...
* Note that we have simply copied it above (not MPI_Type_dup)
- * into the 'span_type' argument of the caller.
+ * into the 'span_type' argument of the caller.
* The caller needs to deal with it there!
*/
}
@@ -1312,7 +1312,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
+H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
int *count, hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute_map,
hbool_t *is_permuted)
{
@@ -1333,7 +1333,7 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type
* out-of-order point selection, then permute this selection which
* should be a memory selection to match the file space permutation.
*/
- if(TRUE == *is_permuted) {
+ if(TRUE == *is_permuted) {
switch(H5S_GET_SELECT_TYPE(space)) {
case H5S_SEL_NONE:
if(H5S_mpio_none_type(new_type, count, is_derived_type) < 0)
@@ -1409,7 +1409,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5S_mpio_create_large_type
*
- * Purpose: Create a large datatype of size larger than what a 32 bit integer
+ * Purpose: Create a large datatype of size larger than what a 32 bit integer
* can hold.
*
* Return: non-negative on success, negative on failure.
diff --git a/src/H5Snone.c b/src/H5Snone.c
index 0a9814c..caa672c 100644
--- a/src/H5Snone.c
+++ b/src/H5Snone.c
@@ -556,7 +556,7 @@ H5S_none_deserialize(H5S_t **space, const uint8_t **p)
UINT32DECODE(*p, version);
if(version < H5S_NONE_VERSION_1 || version > H5S_NONE_VERSION_LATEST)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "bad version number for none selection")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "bad version number for none selection")
/* Skip over the remainder of the header */
*p += 8;
diff --git a/src/H5Spoint.c b/src/H5Spoint.c
index f1de0d4..d4dd144 100644
--- a/src/H5Spoint.c
+++ b/src/H5Spoint.c
@@ -462,7 +462,7 @@ done:
/* Release possible linked list of nodes */
while(top) {
- curr = top->next;
+ curr = top->next;
H5MM_xfree(top->pnt);
top = H5FL_FREE(H5S_pnt_node_t, top);
top = curr;
@@ -1559,7 +1559,7 @@ H5S_point_project_scalar(const H5S_t *space, hsize_t *offset)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "point selection of one element has more than one node!")
/* Calculate offset of selection in projected buffer */
- *offset = H5VM_array_offset(space->extent.rank, space->extent.size, node->pnt);
+ *offset = H5VM_array_offset(space->extent.rank, space->extent.size, node->pnt);
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1613,7 +1613,7 @@ H5S_point_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *off
/* Calculate offset of selection in projected buffer */
HDmemset(block, 0, sizeof(block));
H5MM_memcpy(block, base_space->select.sel_info.pnt_lst->head->pnt, sizeof(hsize_t) * rank_diff);
- *offset = H5VM_array_offset(base_space->extent.rank, base_space->extent.size, block);
+ *offset = H5VM_array_offset(base_space->extent.rank, base_space->extent.size, block);
/* Iterate through base space's point nodes, copying the point information */
base_node = base_space->select.sel_info.pnt_lst->head;
diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h
index 631a8f0..9ed52a9 100644
--- a/src/H5Sprivate.h
+++ b/src/H5Sprivate.h
@@ -315,7 +315,7 @@ H5_DLL herr_t H5S_mpio_space_type(const H5S_t *space, size_t elmt_size,
/* out: */ MPI_Datatype *new_type,
int *count,
hbool_t *is_derived_type,
- hbool_t do_permute,
+ hbool_t do_permute,
hsize_t **permute_map,
hbool_t * is_permuted);
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5Sselect.c b/src/H5Sselect.c
index 40f2258..1e692db 100644
--- a/src/H5Sselect.c
+++ b/src/H5Sselect.c
@@ -1542,7 +1542,7 @@ H5S_get_select_type(const H5S_t *space)
Assumes that there is only a single "block" for hyperslab selections.
EXAMPLES
REVISION LOG
- Modified function to view identical shapes with different dimensions
+ Modified function to view identical shapes with different dimensions
as being the same under some circumstances.
--------------------------------------------------------------------------*/
htri_t
@@ -1575,13 +1575,13 @@ H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2)
/* need to be able to handle spaces of different rank:
*
* To simplify logic, let space_a point to the element of the set
- * {space1, space2} with the largest rank or space1 if the ranks
+ * {space1, space2} with the largest rank or space1 if the ranks
* are identical.
*
* Similarly, let space_b point to the element of {space1, space2}
* with the smallest rank, or space2 if they are identical.
*
- * Let: space_a_rank be the rank of space_a,
+ * Let: space_a_rank be the rank of space_a,
* space_b_rank be the rank of space_b,
* delta_rank = space_a_rank - space_b_rank.
*
@@ -1623,7 +1623,7 @@ H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2)
space_a_dim = (int)space_a_rank - 1;
space_b_dim = (int)space_b_rank - 1;
- /* recall that space_a_rank >= space_b_rank.
+ /* recall that space_a_rank >= space_b_rank.
*
* In the following while loop, we test to see if space_a and space_b
* have identical size in all dimensions they have in common.
@@ -1636,7 +1636,7 @@ H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2)
space_b_dim--;
} /* end while */
- /* Since we are selecting the entire space, we must also verify that space_a
+ /* Since we are selecting the entire space, we must also verify that space_a
* has size 1 in all dimensions that it does not share with space_b.
*/
while(space_a_dim >= 0) {
@@ -1662,7 +1662,7 @@ H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2)
* block == 1 in all dimensions that appear only in space_a.
*/
while(space_b_dim >= 0) {
- if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].stride !=
+ if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].stride !=
space_b->select.sel_info.hslab->opt_diminfo[space_b_dim].stride)
HGOTO_DONE(FALSE)
@@ -1670,7 +1670,7 @@ H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2)
space_b->select.sel_info.hslab->opt_diminfo[space_b_dim].count)
HGOTO_DONE(FALSE)
- if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].block !=
+ if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].block !=
space_b->select.sel_info.hslab->opt_diminfo[space_b_dim].block)
HGOTO_DONE(FALSE)
@@ -1728,11 +1728,11 @@ H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2)
space_a_dim = (int)space_a_rank - 1;
space_b_dim = (int)space_b_rank - 1;
- /* The first block only compares the sizes and sets the
- * relative offsets for later blocks
+ /* The first block only compares the sizes and sets the
+ * relative offsets for later blocks
*/
if(first_block) {
- /* If the block sizes in the common dimensions from
+ /* If the block sizes in the common dimensions from
* each selection don't match, get out
*/
while(space_b_dim >= 0) {
@@ -1836,24 +1836,24 @@ done:
PURPOSE
Given a dataspace a of rank n with some selection, construct a new
- dataspace b of rank m (m != n), with the selection in a being
- topologically identical to that in b (as verified by
+ dataspace b of rank m (m != n), with the selection in a being
+ topologically identical to that in b (as verified by
H5S_select_shape_same().
- This function exists, as some I/O code chokes on topologically
- identical selections with different ranks. At least to begin
+ This function exists, as some I/O code chokes on topologically
+ identical selections with different ranks. At least to begin
with, we will deal with the issue by constructing projections
- of the memory dataspace with ranks equaling those of the file
+ of the memory dataspace with ranks equaling those of the file
dataspace.
- Note that if m > n, it is possible that the starting point in the
- buffer associated with the memory dataspace will have to be
+ Note that if m > n, it is possible that the starting point in the
+ buffer associated with the memory dataspace will have to be
adjusted to match the projected dataspace. If the buf parameter
is not NULL, the function must return an adjusted buffer base
address in *adj_buf_ptr.
USAGE
- htri_t H5S_select_construct_projection(base_space,
+ htri_t H5S_select_construct_projection(base_space,
new_space_ptr,
new_space_rank,
buf,
@@ -1862,30 +1862,30 @@ done:
H5S_t ** new_space_ptr; OUT: Ptr to location in which to return
the address of the projected space
int new_space_rank; IN: Rank of the projected space.
- const void * buf; IN: Base address of the buffer
+ const void * buf; IN: Base address of the buffer
associated with the base space.
May be NULL.
void ** adj_buf_ptr; OUT: If buf != NULL, store the base
- address of the section of buf
+ address of the section of buf
that is described by *new_space_ptr
in *adj_buf_ptr.
-
+
RETURNS
Non-negative on success/Negative on failure.
DESCRIPTION
- Construct a new dataspace and associated selection which is a
- projection of the supplied dataspace and associated selection into
+ Construct a new dataspace and associated selection which is a
+ projection of the supplied dataspace and associated selection into
the specified rank. Return it in *new_space_ptr.
- If buf is supplied, computes the base address of the projected
+ If buf is supplied, computes the base address of the projected
selection in buf, and stores the base address in *adj_buf_ptr.
-
+
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
- The selection in the supplied base_space has thickness 1 in all
+ The selection in the supplied base_space has thickness 1 in all
dimensions greater than new_space_rank. Note that here we count
- dimensions from the fastest changing coordinate to the slowest
+ dimensions from the fastest changing coordinate to the slowest
changing changing coordinate.
EXAMPLES
REVISION LOG
@@ -1931,28 +1931,28 @@ H5S_select_construct_projection(const H5S_t *base_space, H5S_t **new_space_ptr,
/* Create new scalar dataspace */
if(NULL == (new_space = H5S_create(H5S_SCALAR)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "unable to create scalar dataspace")
-
+
/* No need to register the dataspace(i.e. get an ID) as
* we will just be discarding it shortly.
*/
- /* Selection for the new space will be either all or
+ /* Selection for the new space will be either all or
* none, depending on whether the base space has 0 or
* 1 elements selected.
*
- * Observe that the base space can't have more than
+ * Observe that the base space can't have more than
* one selected element, since its selection has the
- * same shape as the file dataspace, and that data
+ * same shape as the file dataspace, and that data
* space is scalar.
*/
if(1 == npoints) {
/* Assuming that the selection in the base dataspace is not
- * empty, we must compute the offset of the selected item in
+ * empty, we must compute the offset of the selected item in
* the buffer associated with the base dataspace.
*
- * Since the new space rank is zero, we know that the
- * the base space must have rank at least 1 -- and
- * hence it is a simple dataspace. However, the
+ * Since the new space rank is zero, we know that the
+ * the base space must have rank at least 1 -- and
+ * hence it is a simple dataspace. However, the
* selection, may be either point, hyperspace, or all.
*
*/
@@ -1970,25 +1970,25 @@ H5S_select_construct_projection(const H5S_t *base_space, H5S_t **new_space_ptr,
hsize_t new_space_dims[H5S_MAX_RANK]; /* Current dimensions for new dataspace */
hsize_t new_space_maxdims[H5S_MAX_RANK];/* Maximum dimensions for new dataspace */
unsigned rank_diff; /* Difference in ranks */
-
+
/* Set up the dimensions of the new, projected dataspace.
*
- * How we do this depends on whether we are projecting up into
- * increased dimensions, or down into a reduced number of
+ * How we do this depends on whether we are projecting up into
+ * increased dimensions, or down into a reduced number of
* dimensions.
*
- * If we are projecting up (the first half of the following
- * if statement), we copy the dimensions of the base data
- * space into the fastest changing dimensions of the new
+ * If we are projecting up (the first half of the following
+ * if statement), we copy the dimensions of the base data
+ * space into the fastest changing dimensions of the new
* projected dataspace, and set the remaining dimensions to
* one.
*
* If we are projecting down (the second half of the following
- * if statement), we just copy the dimensions with the most
+ * if statement), we just copy the dimensions with the most
* quickly changing dimensions into the dims for the projected
* data set.
*
- * This works, because H5S_select_shape_same() will return
+ * This works, because H5S_select_shape_same() will return
* true on selections of different rank iff:
*
* 1) the selection in the lower rank dataspace matches that
@@ -1996,13 +1996,13 @@ H5S_select_construct_projection(const H5S_t *base_space, H5S_t **new_space_ptr,
* the larger rank dataspace, and
*
* 2) the selection has thickness 1 in all ranks that appear
- * only in the higher rank dataspace (i.e. those with
+ * only in the higher rank dataspace (i.e. those with
* more slowly changing indicies).
- */
+ */
if(new_space_rank > base_space_rank) {
hsize_t tmp_dim_size = 1; /* Temporary dimension value, for filling arrays */
- /* we must copy the dimensions of the base space into
+ /* we must copy the dimensions of the base space into
* the fastest changing dimensions of the new space,
* and set the remaining dimensions to 1
*/
@@ -2013,7 +2013,7 @@ H5S_select_construct_projection(const H5S_t *base_space, H5S_t **new_space_ptr,
H5MM_memcpy(&new_space_maxdims[rank_diff], base_space_maxdims, sizeof(new_space_maxdims[0]) * base_space_rank);
} /* end if */
else { /* new_space_rank < base_space_rank */
- /* we must copy the fastest changing dimension of the
+ /* we must copy the fastest changing dimension of the
* base space into the dimensions of the new space.
*/
rank_diff = base_space_rank - new_space_rank;
@@ -2021,12 +2021,12 @@ H5S_select_construct_projection(const H5S_t *base_space, H5S_t **new_space_ptr,
H5MM_memcpy(new_space_maxdims, &base_space_maxdims[rank_diff], sizeof(new_space_maxdims[0]) * new_space_rank);
} /* end else */
- /* now have the new space rank and dimensions set up --
+ /* now have the new space rank and dimensions set up --
* so we can create the new simple dataspace.
*/
if(NULL == (new_space = H5S_create_simple(new_space_rank, new_space_dims, new_space_maxdims)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace")
-
+
/* No need to register the dataspace(i.e. get an ID) as
* we will just be discarding it shortly.
*/
@@ -2037,13 +2037,13 @@ H5S_select_construct_projection(const H5S_t *base_space, H5S_t **new_space_ptr,
*/
if(H5S_SELECT_PROJECT_SIMPLE(base_space, new_space, &projected_space_element_offset) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, FAIL, "unable to project simple selection")
-
+
/* If we get this far, we have created the new dataspace, and projected
* the selection in the base dataspace into the new dataspace.
*
- * If the base dataspace is simple, check to see if the
- * offset_changed flag on the base selection has been set -- if so,
- * project the offset into the new dataspace and set the
+ * If the base dataspace is simple, check to see if the
+ * offset_changed flag on the base selection has been set -- if so,
+ * project the offset into the new dataspace and set the
* offset_changed flag.
*/
if(H5S_GET_EXTENT_TYPE(base_space) == H5S_SIMPLE && base_space->select.offset_changed) {
@@ -2075,12 +2075,12 @@ H5S_select_construct_projection(const H5S_t *base_space, H5S_t **new_space_ptr,
* Since we can't do pointer arithmetic on void pointers, we first
* cast buf to a pointer to byte -- i.e. uint8_t.
*
- * We then multiply the projected space element offset we
- * calculated earlier by the supplied element size, add this
- * value to the type cast buf pointer, cast the result back
+ * We then multiply the projected space element offset we
+ * calculated earlier by the supplied element size, add this
+ * value to the type cast buf pointer, cast the result back
* to a pointer to void, and assign the result to *adj_buf_ptr.
*/
- *adj_buf_ptr = (const void *)(((const uint8_t *)buf) +
+ *adj_buf_ptr = (const void *)(((const uint8_t *)buf) +
((size_t)(projected_space_element_offset * element_size)));
} /* end if */
else
@@ -2230,7 +2230,7 @@ done:
src_intersect_space within the selection of src_space as a selection
within the selection of dst_space. The result is placed in the
selection of new_space_ptr.
-
+
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
EXAMPLES
@@ -2318,7 +2318,7 @@ done:
Removes any and all portions of space that are also present in
subtract_space. In essence, performs an A_NOT_B operation with the
two selections.
-
+
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
EXAMPLES
diff --git a/src/H5TS.c b/src/H5TS.c
index 10e14d5..adc31dc 100644
--- a/src/H5TS.c
+++ b/src/H5TS.c
@@ -143,7 +143,7 @@ herr_t
H5TS_mutex_lock(H5TS_mutex_t *mutex)
{
#ifdef H5_HAVE_WIN_THREADS
- EnterCriticalSection( &mutex->CriticalSection);
+ EnterCriticalSection( &mutex->CriticalSection);
return 0;
#else /* H5_HAVE_WIN_THREADS */
herr_t ret_value = pthread_mutex_lock(&mutex->atomic_lock);
@@ -164,7 +164,7 @@ H5TS_mutex_lock(H5TS_mutex_t *mutex)
mutex->lock_count = 1;
}
- return pthread_mutex_unlock(&mutex->atomic_lock);
+ return pthread_mutex_unlock(&mutex->atomic_lock);
#endif /* H5_HAVE_WIN_THREADS */
}
@@ -195,7 +195,7 @@ H5TS_mutex_unlock(H5TS_mutex_t *mutex)
#ifdef H5_HAVE_WIN_THREADS
/* Releases ownership of the specified critical section object. */
LeaveCriticalSection(&mutex->CriticalSection);
- return 0;
+ return 0;
#else /* H5_HAVE_WIN_THREADS */
herr_t ret_value = pthread_mutex_lock(&mutex->atomic_lock);
@@ -214,7 +214,7 @@ H5TS_mutex_unlock(H5TS_mutex_t *mutex)
ret_value = err;
} /* end if */
- return ret_value;
+ return ret_value;
#endif /* H5_HAVE_WIN_THREADS */
} /* H5TS_mutex_unlock */
@@ -251,15 +251,15 @@ H5TS_cancel_count_inc(void)
return SUCCEED;
#else /* H5_HAVE_WIN_THREADS */
H5TS_cancel_t *cancel_counter;
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED;
- cancel_counter = (H5TS_cancel_t *)H5TS_get_thread_local_value(H5TS_cancel_key_g);
+ cancel_counter = (H5TS_cancel_t *)H5TS_get_thread_local_value(H5TS_cancel_key_g);
if (!cancel_counter) {
/*
* First time thread calls library - create new counter and associate
* with key.
- *
+ *
* Don't use H5MM calls here since the destructor has to use HDfree in
* order to avoid codestack calls.
*/
@@ -280,7 +280,7 @@ H5TS_cancel_count_inc(void)
++cancel_counter->cancel_count;
- return ret_value;
+ return ret_value;
#endif /* H5_HAVE_WIN_THREADS */
}
@@ -310,19 +310,19 @@ H5TS_cancel_count_inc(void)
herr_t
H5TS_cancel_count_dec(void)
{
-#ifdef H5_HAVE_WIN_THREADS
+#ifdef H5_HAVE_WIN_THREADS
/* unsupported; will just return 0 */
return SUCCEED;
#else /* H5_HAVE_WIN_THREADS */
- register H5TS_cancel_t *cancel_counter;
+ register H5TS_cancel_t *cancel_counter;
herr_t ret_value = SUCCEED;
- cancel_counter = (H5TS_cancel_t *)H5TS_get_thread_local_value(H5TS_cancel_key_g);
+ cancel_counter = (H5TS_cancel_t *)H5TS_get_thread_local_value(H5TS_cancel_key_g);
if (cancel_counter->cancel_count == 1)
ret_value = pthread_setcancelstate(cancel_counter->previous_state, NULL);
- --cancel_counter->cancel_count;
+ --cancel_counter->cancel_count;
return ret_value;
#endif /* H5_HAVE_WIN_THREADS */
@@ -342,7 +342,7 @@ H5TS_cancel_count_dec(void)
*
*--------------------------------------------------------------------------
*/
-H5_DLL BOOL CALLBACK
+H5_DLL BOOL CALLBACK
H5TS_win32_process_enter(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *lpContex)
{
BOOL ret_value = TRUE;
@@ -504,7 +504,7 @@ H5TS_create_thread(void *(*func)(void *), H5TS_attr_t *attr, void *udata)
{
H5TS_thread_t ret_value;
-#ifdef H5_HAVE_WIN_THREADS
+#ifdef H5_HAVE_WIN_THREADS
/* When calling C runtime functions, you should use _beginthread or
* _beginthreadex instead of CreateThread. Threads created with
diff --git a/src/H5Torder.c b/src/H5Torder.c
index ca00555..a6d6421 100644
--- a/src/H5Torder.c
+++ b/src/H5Torder.c
@@ -86,7 +86,7 @@ static herr_t H5T_set_order(H5T_t *dtype, H5T_order_t order);
*
* Programmer: Robb Matzke
* Wednesday, January 7, 1998
- *
+ *
*-------------------------------------------------------------------------
*/
H5T_order_t
@@ -159,7 +159,7 @@ H5T_get_order(const H5T_t *dtype)
if(memb_order != H5T_ORDER_NONE && ret_value == H5T_ORDER_NONE)
ret_value = memb_order;
- /* If the orders are mixed, stop the loop and report it.
+ /* If the orders are mixed, stop the loop and report it.
* (H5T_ORDER_NONE is ignored)
*/
if(memb_order != H5T_ORDER_NONE && ret_value != H5T_ORDER_NONE
@@ -186,7 +186,7 @@ done:
* 2. H5T_ORDER_NONE only works for reference and fixed-length
* string.
* 3. For opaque type, the order will be ignored.
- * 4. For compound type, all restrictions above apply to the
+ * 4. For compound type, all restrictions above apply to the
* members.
*
* Return: Non-negative on success/Negative on failure
@@ -244,12 +244,12 @@ H5T_set_order(H5T_t *dtype, H5T_order_t order)
if(H5T_ENUM == dtype->shared->type && dtype->shared->u.enumer.nmembs > 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTSET, FAIL, "operation not allowed after enum members are defined")
- /* For derived data type, defer to parent */
+ /* For derived data type, defer to parent */
while(dtype->shared->parent)
dtype = dtype->shared->parent;
/* Check for setting order on inappropriate datatype */
- if(order == H5T_ORDER_NONE && !(H5T_REFERENCE == dtype->shared->type ||
+ if(order == H5T_ORDER_NONE && !(H5T_REFERENCE == dtype->shared->type ||
H5T_OPAQUE == dtype->shared->type || H5T_IS_FIXED_STRING(dtype->shared)))
HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "illegal byte order for type")
diff --git a/src/H5Tvlen.c b/src/H5Tvlen.c
index 6dcb1f9..3cca7c0 100644
--- a/src/H5Tvlen.c
+++ b/src/H5Tvlen.c
@@ -1158,7 +1158,7 @@ done:
* Function: H5T_vlen_reclaim_elmt
*
* Purpose: Alternative method to reclaim any VL data for a buffer element.
- *
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: Mike McGreevy
diff --git a/src/H5UC.c b/src/H5UC.c
index 2277818..dd61539 100644
--- a/src/H5UC.c
+++ b/src/H5UC.c
@@ -16,7 +16,7 @@
*
* These are used for various internal buffers which are shared.
*
- * The module used to be H5RC, but changed to H5UC because of
+ * The module used to be H5RC, but changed to H5UC because of
* conflicting requirement for the use of H5RC.
*
*/
diff --git a/src/H5UCprivate.h b/src/H5UCprivate.h
index c451f31..e4916df 100644
--- a/src/H5UCprivate.h
+++ b/src/H5UCprivate.h
@@ -13,7 +13,7 @@
/*
* This file contains private information about the H5UC module
- * The module used to be H5RC, but changed to H5UC because of
+ * The module used to be H5RC, but changed to H5UC because of
* conflicting requirement for the use of H5RC.
*/
diff --git a/src/H5VM.c b/src/H5VM.c
index 3e57ce8..99b8385 100644
--- a/src/H5VM.c
+++ b/src/H5VM.c
@@ -1226,7 +1226,7 @@ H5VM_chunk_index(unsigned ndims, const hsize_t *coord, const uint32_t *chunk,
/* Defer to H5VM_chunk_index_scaled */
chunk_idx = H5VM_chunk_index_scaled(ndims, coord, chunk, down_nchunks, scaled_coord);
-
+
FUNC_LEAVE_NOAPI(chunk_idx)
} /* end H5VM_chunk_index() */