From c1db3c97c6d00efadc1240c71eedb57cce423fe5 Mon Sep 17 00:00:00 2001 From: Allen Byrne Date: Wed, 13 May 2020 07:15:38 -0500 Subject: Whitespace cleanup --- src/H5.c | 14 +- src/H5A.c | 2 +- src/H5AC.c | 66 +-- src/H5ACdbg.c | 10 +- src/H5ACmpio.c | 256 ++++++------ src/H5ACpkg.h | 42 +- src/H5ACprivate.h | 12 +- src/H5Adense.c | 12 +- src/H5Adeprec.c | 2 +- src/H5B2.c | 6 +- src/H5B2int.c | 12 +- src/H5B2internal.c | 4 +- src/H5B2leaf.c | 2 +- src/H5B2pkg.h | 2 +- src/H5B2test.c | 4 +- src/H5C.c | 412 +++++++++--------- src/H5CX.c | 22 +- src/H5Cdbg.c | 86 ++-- src/H5Cepoch.c | 10 +- src/H5Cimage.c | 538 ++++++++++++------------ src/H5Clog.c | 4 +- src/H5Clog_json.c | 74 ++-- src/H5Clog_trace.c | 38 +- src/H5Cmpio.c | 124 +++--- src/H5Cpkg.h | 306 +++++++------- src/H5Cprefetched.c | 42 +- src/H5Cprivate.h | 556 ++++++++++++------------- src/H5Cquery.c | 4 +- src/H5Ctag.c | 72 ++-- src/H5D.c | 8 +- src/H5Dbtree2.c | 26 +- src/H5Dchunk.c | 1146 +++++++++++++++++++++++++-------------------------- src/H5Dcontig.c | 6 +- src/H5Dearray.c | 6 +- src/H5Dfarray.c | 10 +- src/H5Dfill.c | 2 +- src/H5Dint.c | 2 + src/H5Dio.c | 6 +- src/H5Dmpio.c | 46 +-- src/H5Dnone.c | 8 +- src/H5Dpublic.h | 4 +- src/H5Dsingle.c | 6 +- src/H5E.c | 4 +- src/H5EAcache.c | 8 +- src/H5EApkg.h | 8 +- src/H5EAtest.c | 8 +- src/H5Eint.c | 2 +- src/H5Epkg.h | 2 +- 48 files changed, 2022 insertions(+), 2020 deletions(-) diff --git a/src/H5.c b/src/H5.c index 7d44911..accea68 100644 --- a/src/H5.c +++ b/src/H5.c @@ -138,8 +138,8 @@ H5_init_library(void) if (mpi_initialized && !mpi_finalized) { int key_val; - if(MPI_SUCCESS != (mpi_code = MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, - (MPI_Comm_delete_attr_function *)H5_mpi_delete_cb, + if(MPI_SUCCESS != (mpi_code = MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, + (MPI_Comm_delete_attr_function *)H5_mpi_delete_cb, &key_val, NULL))) HMPI_GOTO_ERROR(FAIL, "MPI_Comm_create_keyval failed", mpi_code) @@ -209,7 +209,7 @@ H5_init_library(void) * The link interface needs to be initialized so that link property lists * have their properties registered. * The FS module needs to be initialized as a result of the fix for HDFFV-10160: - * It might not be initialized during normal file open. + * It might not be initialized during normal file open. * When the application does not close the file, routines in the module might * be called via H5_term_library() when shutting down the file. */ @@ -743,7 +743,7 @@ H5_debug_mask(const char *s) /*------------------------------------------------------------------------- * Function: H5_mpi_delete_cb * - * Purpose: Callback attribute on MPI_COMM_SELF to terminate the HDF5 + * Purpose: Callback attribute on MPI_COMM_SELF to terminate the HDF5 * library when the communicator is destroyed, i.e. on MPI_Finalize. * * Return: MPI_SUCCESS @@ -989,7 +989,7 @@ H5close(void) * Return: * * Success: A pointer to the allocated buffer. - * + * * Failure: NULL * *------------------------------------------------------------------------- @@ -1030,7 +1030,7 @@ H5allocate_memory(size_t size, hbool_t clear) * Return: * * Success: A pointer to the resized buffer. - * + * * Failure: NULL (the input buffer will be unchanged) * *------------------------------------------------------------------------- @@ -1091,7 +1091,7 @@ H5is_library_threadsafe(hbool_t *is_ts) H5TRACE1("e", "*b", is_ts); HDassert(is_ts); - + /* At this time, it is impossible for this to fail. */ #ifdef H5_HAVE_THREADSAFE *is_ts = TRUE; diff --git a/src/H5A.c b/src/H5A.c index f99fb7d..2986466 100644 --- a/src/H5A.c +++ b/src/H5A.c @@ -439,7 +439,7 @@ H5Aopen(hid_t loc_id, const char *attr_name, hid_t aapl_id) { void *attr = NULL; /* attr object from VOL connector */ H5VL_object_t *vol_obj = NULL; /* object of loc_id */ - H5VL_loc_params_t loc_params; + H5VL_loc_params_t loc_params; hid_t ret_value = H5I_INVALID_HID; FUNC_ENTER_API(H5I_INVALID_HID) diff --git a/src/H5AC.c b/src/H5AC.c index f6243ad..1756107 100644 --- a/src/H5AC.c +++ b/src/H5AC.c @@ -228,10 +228,10 @@ H5AC_term_package(void) * * Function: H5AC_cache_image_pending() * - * Purpose: Debugging function that tests to see if the load of a + * Purpose: Debugging function that tests to see if the load of a * metadata cache image load is pending (i.e. will be executed * on the next protect or insert) - * + * * Returns TRUE if a cache image load is pending, and FALSE * if not. Throws an assertion failure on error. * @@ -421,8 +421,8 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "auto resize configuration failed") /* Don't need to get the current H5C image config here since the - * cache has just been created, and thus f->shared->cache->image_ctl - * must still set to its initial value (H5C__DEFAULT_CACHE_IMAGE_CTL). + * cache has just been created, and thus f->shared->cache->image_ctl + * must still set to its initial value (H5C__DEFAULT_CACHE_IMAGE_CTL). * Note that this not true as soon as control returns to the application * program, as some test code modifies f->shared->cache->image_ctl. */ @@ -517,10 +517,10 @@ H5AC_dest(H5F_t *f) /* Sanity check */ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); - /* If the file was opened R/W, attempt to flush all entries + /* If the file was opened R/W, attempt to flush all entries * from rank 0 & Bcast clean list to other ranks. * - * Must not flush in the R/O case, as this will trigger the + * Must not flush in the R/O case, as this will trigger the * free space manager settle routines. */ if(H5F_ACC_RDWR & H5F_INTENT(f)) @@ -1963,10 +1963,10 @@ done: * and flag an error and return FAIL otherwise. * * At present, this function operates by packing the data - * from the instance of H5AC_cache_image_config_t into an + * from the instance of H5AC_cache_image_config_t into an * instance of H5C_cache_image_ctl_t, and then calling - * H5C_validate_cache_image_config(). If and when - * H5AC_cache_image_config_t and H5C_cache_image_ctl_t + * H5C_validate_cache_image_config(). If and when + * H5AC_cache_image_config_t and H5C_cache_image_ctl_t * diverge, we may have to change this. * * Return: Non-negative on success/Negative on failure @@ -1992,7 +1992,7 @@ H5AC_validate_cache_image_config(H5AC_cache_image_config_t *config_ptr) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Unknown image config version") /* don't need to get the current H5C image config here since the - * default values of fields not in the H5AC config will always be + * default values of fields not in the H5AC config will always be * valid. */ internal_config.version = config_ptr->version; @@ -2132,11 +2132,11 @@ done: /*------------------------------------------------------------------------------ * Function: H5AC_ignore_tags() * - * Purpose: Override all assertion frameworks and force application of + * Purpose: Override all assertion frameworks and force application of * global tag everywhere. This should really only be used in the - * tests that need to access functions without going through + * tests that need to access functions without going through * API paths. - * + * * Return: SUCCEED on success, FAIL otherwise. * * Programmer: Mike McGreevy @@ -2159,7 +2159,7 @@ H5AC_ignore_tags(const H5F_t *f) /* Set up a new metadata tag */ if(H5C_ignore_tags(f->shared->cache) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "H5C_ignore_tags() failed") - + done: FUNC_LEAVE_NOAPI(ret_value) } /* H5AC_ignore_tags() */ @@ -2169,7 +2169,7 @@ done: * Function: H5AC_tag() * * Purpose: Sets the metadata tag property in the provided property list. - * + * * Return: void * * Programmer: Mike McGreevy @@ -2197,9 +2197,9 @@ H5AC_tag(haddr_t metadata_tag, haddr_t *prev_tag) * Function: H5AC_retag_copied_metadata() * * Purpose: Searches through cache index for all entries with the - * H5AC__COPIED_TAG, indicating that it was created as a + * H5AC__COPIED_TAG, indicating that it was created as a * result of an object copy, and applies the provided tag. - * + * * Return: SUCCEED on success, FAIL otherwise. * * Programmer: Mike McGreevy @@ -2208,7 +2208,7 @@ H5AC_tag(haddr_t metadata_tag, haddr_t *prev_tag) *------------------------------------------------------------------------------ */ herr_t -H5AC_retag_copied_metadata(const H5F_t *f, haddr_t metadata_tag) +H5AC_retag_copied_metadata(const H5F_t *f, haddr_t metadata_tag) { herr_t ret_value = SUCCEED; /* Return value */ @@ -2217,7 +2217,7 @@ H5AC_retag_copied_metadata(const H5F_t *f, haddr_t metadata_tag) /* Sanity checks */ HDassert(f); HDassert(f->shared); - + /* Call cache-level function to re-tag entries with the COPIED tag */ if(H5C_retag_entries(f->shared->cache, H5AC__COPIED_TAG, metadata_tag) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "Can't retag metadata") @@ -2231,8 +2231,8 @@ done: * Function: H5AC_flush_tagged_metadata() * * Purpose: Wrapper for cache level function which flushes all metadata - * that contains the specific tag. - * + * that contains the specific tag. + * * Return: SUCCEED on success, FAIL otherwise. * * Programmer: Mike McGreevy @@ -2245,8 +2245,8 @@ H5AC_flush_tagged_metadata(H5F_t *f, haddr_t metadata_tag) { /* Variable Declarations */ herr_t ret_value = SUCCEED; - - /* Function Enter Macro */ + + /* Function Enter Macro */ FUNC_ENTER_NOAPI(FAIL) /* Assertions */ @@ -2266,8 +2266,8 @@ done: * Function: H5AC_evict_tagged_metadata() * * Purpose: Wrapper for cache level function which flushes all metadata - * that contains the specific tag. - * + * that contains the specific tag. + * * Return: SUCCEED on success, FAIL otherwise. * * Programmer: Mike McGreevy @@ -2280,8 +2280,8 @@ H5AC_evict_tagged_metadata(H5F_t *f, haddr_t metadata_tag, hbool_t match_global) { /* Variable Declarations */ herr_t ret_value = SUCCEED; - - /* Function Enter Macro */ + + /* Function Enter Macro */ FUNC_ENTER_NOAPI(FAIL) /* Assertions */ @@ -2302,7 +2302,7 @@ done: * * Purpose: Wrapper for cache level function which expunge entries with * a specific tag and type id. - * + * * Return: SUCCEED on success, FAIL otherwise. * * Programmer: Vailin Choi; May 2016 @@ -2314,8 +2314,8 @@ H5AC_expunge_tag_type_metadata(H5F_t *f, haddr_t tag, int type_id, unsigned flag { /* Variable Declarations */ herr_t ret_value = SUCCEED; - - /* Function Enter Macro */ + + /* Function Enter Macro */ FUNC_ENTER_NOAPI(FAIL) /* Assertions */ @@ -2335,7 +2335,7 @@ done: * Function: H5AC_get_tag() * * Purpose: Get the tag for a metadata cache entry. - * + * * Return: SUCCEED/FAIL * * Programmer: Dana Robinson @@ -2348,8 +2348,8 @@ H5AC_get_tag(const void *thing, haddr_t *tag) { /* Variable Declarations */ herr_t ret_value = SUCCEED; - - /* Function Enter Macro */ + + /* Function Enter Macro */ FUNC_ENTER_NOAPI(FAIL) /* Assertions */ diff --git a/src/H5ACdbg.c b/src/H5ACdbg.c index 1235206..b40f8d0 100644 --- a/src/H5ACdbg.c +++ b/src/H5ACdbg.c @@ -253,8 +253,8 @@ H5AC_flush_dependency_exists(H5F_t *f, haddr_t parent_addr, haddr_t child_addr, * type field contains the expected value. * * If the specified entry is in cache, *in_cache_ptr is set - * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending - * on whether the entries type field matches the + * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending + * on whether the entries type field matches the * expected_type parameter * * If the target entry is not in cache, *in_cache_ptr is @@ -278,8 +278,8 @@ H5AC_flush_dependency_exists(H5F_t *f, haddr_t parent_addr, haddr_t child_addr, */ #ifndef NDEBUG herr_t -H5AC_verify_entry_type(const H5F_t *f, haddr_t addr, - const H5AC_class_t *expected_type, hbool_t *in_cache_ptr, +H5AC_verify_entry_type(const H5F_t *f, haddr_t addr, + const H5AC_class_t *expected_type, hbool_t *in_cache_ptr, hbool_t *type_ok_ptr) { H5C_t * cache_ptr; @@ -304,7 +304,7 @@ done: /*------------------------------------------------------------------------- * Function: H5AC_get_serialization_in_progress * - * Purpose: Return the current value of + * Purpose: Return the current value of * cache_ptr->serialization_in_progress. * * Return: Current value of cache_ptr->serialization_in_progress. diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c index fadde28..f097e83 100644 --- a/src/H5ACmpio.c +++ b/src/H5ACmpio.c @@ -135,7 +135,7 @@ H5FL_DEFINE_STATIC(H5AC_slist_entry_t); /*------------------------------------------------------------------------- * Function: H5AC__set_sync_point_done_callback * - * Purpose: Set the value of the sync_point_done callback. This + * Purpose: Set the value of the sync_point_done callback. This * callback is used by the parallel test code to verify * that the expected writes and only the expected writes * take place during a sync point. @@ -204,7 +204,7 @@ H5AC__set_write_done_callback(H5C_t * cache_ptr, void (* write_done)(void)) * Function: H5AC_add_candidate() * * Purpose: Add the supplied metadata entry address to the candidate - * list. Verify that each entry added does not appear in + * list. Verify that each entry added does not appear in * the list prior to its insertion. * * This function is intended for used in constructing list @@ -261,11 +261,11 @@ done: * * Purpose: Broadcast the contents of the process 0 candidate entry * slist. In passing, also remove all entries from said - * list. As the application of this will be handled by - * the same functions on all processes, construct and + * list. As the application of this will be handled by + * the same functions on all processes, construct and * return a copy of the list in the same format as that * received by the other processes. Note that if this - * copy is returned in *haddr_buf_ptr_ptr, the caller + * copy is returned in *haddr_buf_ptr_ptr, the caller * must free it. * * This function must only be called by the process with @@ -317,7 +317,7 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr, unsigned chk_num_entries = 0; /* convert the candidate list into the format we - * are used to receiving from process 0, and also load it + * are used to receiving from process 0, and also load it * into a buffer for transmission. */ if(H5AC__copy_candidate_list_to_buffer(cache_ptr, &chk_num_entries, &haddr_buf_ptr) < 0) @@ -331,7 +331,7 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr, HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result) } /* end if */ - /* Pass the number of entries and the buffer pointer + /* Pass the number of entries and the buffer pointer * back to the caller. Do this so that we can use the same code * to apply the candidate list to all the processes. */ @@ -481,10 +481,10 @@ done: /*------------------------------------------------------------------------- * Function: H5AC__construct_candidate_list() * - * Purpose: In the parallel case when the metadata_write_strategy is + * Purpose: In the parallel case when the metadata_write_strategy is * H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, process 0 uses - * this function to construct the list of cache entries to - * be flushed. This list is then propagated to the other + * this function to construct the list of cache entries to + * be flushed. This list is then propagated to the other * caches, and then flushed in a distributed fashion. * * The sync_point_op parameter is used to determine the extent @@ -581,22 +581,22 @@ H5AC__copy_candidate_list_to_buffer_cb(void *_item, void H5_ATTR_UNUSED *_key, * Function: H5AC__copy_candidate_list_to_buffer * * Purpose: Allocate buffer(s) and copy the contents of the candidate - * entry slist into it (them). In passing, remove all - * entries from the candidate slist. Note that the + * entry slist into it (them). In passing, remove all + * entries from the candidate slist. Note that the * candidate slist must not be empty. * * If MPI_Offset_buf_ptr_ptr is not NULL, allocate a buffer * of MPI_Offset, copy the contents of the candidate - * entry list into it with the appropriate conversions, - * and return the base address of the buffer in + * entry list into it with the appropriate conversions, + * and return the base address of the buffer in * *MPI_Offset_buf_ptr. Note that this is the buffer - * used by process 0 to transmit the list of entries to + * used by process 0 to transmit the list of entries to * be flushed to all other processes (in this file group). * * Similarly, allocate a buffer of haddr_t, load the contents - * of the candidate list into this buffer, and return its - * base address in *haddr_buf_ptr_ptr. Note that this - * latter buffer is constructed unconditionally. + * of the candidate list into this buffer, and return its + * base address in *haddr_buf_ptr_ptr. Note that this + * latter buffer is constructed unconditionally. * * In passing, also remove all entries from the candidate * entry slist. @@ -635,8 +635,8 @@ H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, unsigned *num_entri num_entries = (unsigned)H5SL_count(aux_ptr->candidate_slist_ptr); - /* allocate a buffer(s) to store the list of candidate entry - * base addresses in + /* allocate a buffer(s) to store the list of candidate entry + * base addresses in */ buf_size = sizeof(haddr_t) * num_entries; if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size))) @@ -651,7 +651,7 @@ H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, unsigned *num_entri if(H5SL_free(aux_ptr->candidate_slist_ptr, H5AC__copy_candidate_list_to_buffer_cb, &udata) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "Can't build address list for candidate entries") - /* Pass the number of entries and the buffer pointer + /* Pass the number of entries and the buffer pointer * back to the caller. */ *num_entries_ptr = num_entries; @@ -1131,32 +1131,32 @@ done: /*------------------------------------------------------------------------- * Function: H5AC__propagate_and_apply_candidate_list * - * Purpose: Prior to the addition of support for multiple metadata - * write strategies, in PHDF5, only the metadata cache with - * mpi rank 0 was allowed to write to file. All other - * metadata caches on processes with rank greater than 0 - * were required to retain dirty entries until they were + * Purpose: Prior to the addition of support for multiple metadata + * write strategies, in PHDF5, only the metadata cache with + * mpi rank 0 was allowed to write to file. All other + * metadata caches on processes with rank greater than 0 + * were required to retain dirty entries until they were * notified that the entry was clean. * - * This constraint is relaxed with the distributed + * This constraint is relaxed with the distributed * metadata write strategy, in which a list of candidate * metadata cache entries is constructed by the process 0 * cache and then distributed to the caches of all the other - * processes. Once the listed is distributed, many (if not - * all) processes writing writing a unique subset of the - * entries, and marking the remainder clean. The subsets - * are chosen so that each entry in the list of candidates - * is written by exactly one cache, and all entries are + * processes. Once the listed is distributed, many (if not + * all) processes writing writing a unique subset of the + * entries, and marking the remainder clean. The subsets + * are chosen so that each entry in the list of candidates + * is written by exactly one cache, and all entries are * marked as being clean in all caches. * - * While the list of candidate cache entries is prepared + * While the list of candidate cache entries is prepared * elsewhere, this function is the main routine for distributing - * and applying the list. It must be run simultaniously on + * and applying the list. It must be run simultaniously on * all processes that have the relevant file open. To ensure - * proper synchronization, there is a barrier at the beginning + * proper synchronization, there is a barrier at the beginning * of this function. * - * At present, this function is called under one of two + * At present, this function is called under one of two * circumstances: * * 1) Dirty byte creation exceeds some user specified value. @@ -1167,10 +1167,10 @@ done: * and therefore the same dirty data creation. * * This fact is used to synchronize the caches for purposes - * of propagating the list of candidate entries, by simply - * calling this function from all caches whenever some user - * specified threshold on dirty data is exceeded. (the - * process 0 cache creates the candidate list just before + * of propagating the list of candidate entries, by simply + * calling this function from all caches whenever some user + * specified threshold on dirty data is exceeded. (the + * process 0 cache creates the candidate list just before * calling this function). * * 2) Under direct user control -- this operation must be @@ -1185,20 +1185,20 @@ done: * * For the process with mpi rank 0: * - * 1) Load the contents of the candidate list + * 1) Load the contents of the candidate list * (candidate_slist_ptr) into a buffer, and broadcast that * buffer to all the other caches. Clear the candidate * list in passing. * - * If there is a positive number of candidates, proceed with + * If there is a positive number of candidates, proceed with * the following: * * 2) Apply the candidate entry list. * * 3) Particpate in a closing barrier. * - * 4) Remove from the dirty list (d_slist_ptr) and from the - * flushed and still clean entries list (c_slist_ptr), + * 4) Remove from the dirty list (d_slist_ptr) and from the + * flushed and still clean entries list (c_slist_ptr), * all addresses that appeared in the candidate list, as * these entries are now clean. * @@ -1207,7 +1207,7 @@ done: * * 1) Receive the candidate entry list broadcast * - * If there is a positive number of candidates, proceed with + * If there is a positive number of candidates, proceed with * the following: * * 2) Apply the candidate entry list. @@ -1264,8 +1264,8 @@ H5AC__propagate_and_apply_candidate_list(H5F_t *f) if(num_candidates > 0) { herr_t result; - /* all processes apply the candidate list. - * H5C_apply_candidate_list() handles the details of + /* all processes apply the candidate list. + * H5C_apply_candidate_list() handles the details of * distributing the writes across the processes. */ @@ -1284,7 +1284,7 @@ H5AC__propagate_and_apply_candidate_list(H5F_t *f) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't apply candidate list.") /* this code exists primarily for the test bed -- it allows us to - * enforce posix semantics on the server that pretends to be a + * enforce posix semantics on the server that pretends to be a * file system in our parallel tests. */ if(aux_ptr->write_done) @@ -1323,16 +1323,16 @@ done: * Function: H5AC__propagate_flushed_and_still_clean_entries_list * * Purpose: In PHDF5, if the process 0 only metadata write strategy - * is selected, only the metadata cache with mpi rank 0 is - * allowed to write to file. All other metadata caches on - * processes with rank greater than 0 must retain dirty - * entries until they are notified that the entry is now + * is selected, only the metadata cache with mpi rank 0 is + * allowed to write to file. All other metadata caches on + * processes with rank greater than 0 must retain dirty + * entries until they are notified that the entry is now * clean. * - * This function is the main routine for handling this - * notification procedure. It must be called - * simultaniously on all processes that have the relevant - * file open. To this end, it is called only during a + * This function is the main routine for handling this + * notification procedure. It must be called + * simultaniously on all processes that have the relevant + * file open. To this end, it is called only during a * sync point, with a barrier prior to the call. * * Note that any metadata entry writes by process 0 will @@ -1428,7 +1428,7 @@ done: * * Purpose: Receive the list of entry addresses from process 0, * and return it in a buffer pointed to by *haddr_buf_ptr_ptr. - * Note that the caller must free this buffer if it is + * Note that the caller must free this buffer if it is * returned. * * This function must only be called by the process with @@ -1479,7 +1479,7 @@ H5AC__receive_haddr_list(MPI_Comm mpi_comm, unsigned *num_entries_ptr, HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result) } /* end if */ - /* finally, pass the number of entries and the buffer pointer + /* finally, pass the number of entries and the buffer pointer * back to the caller. */ *num_entries_ptr = num_entries; @@ -1562,7 +1562,7 @@ done: * * Purpose: Receive the list of candidate entries from process 0, * and return it in a buffer pointed to by *haddr_buf_ptr_ptr. - * Note that the caller must free this buffer if it is + * Note that the caller must free this buffer if it is * returned. * * This function must only be called by the process with @@ -1612,38 +1612,38 @@ done: * Purpose: Routine for handling the details of running a sync point * that is triggered by a flush -- which in turn must have been * triggered by either a flush API call or a file close -- - * when the distributed metadata write strategy is selected. - * - * Upon entry, each process generates it own candidate list, - * being a sorted list of all dirty metadata entries currently - * in the metadata cache. Note that this list must be idendical - * across all processes, as all processes see the same stream - * of dirty metadata coming in, and use the same lists of - * candidate entries at each sync point. (At first glance, this + * when the distributed metadata write strategy is selected. + * + * Upon entry, each process generates it own candidate list, + * being a sorted list of all dirty metadata entries currently + * in the metadata cache. Note that this list must be idendical + * across all processes, as all processes see the same stream + * of dirty metadata coming in, and use the same lists of + * candidate entries at each sync point. (At first glance, this * argument sounds circular, but think of it in the sense of * a recursive proof). * - * If this this list is empty, we are done, and the function + * If this this list is empty, we are done, and the function * returns * - * Otherwise, after the sorted list dirty metadata entries is - * constructed, each process uses the same algorithm to assign - * each entry on the candidate list to exactly one process for + * Otherwise, after the sorted list dirty metadata entries is + * constructed, each process uses the same algorithm to assign + * each entry on the candidate list to exactly one process for * flushing. * * At this point, all processes participate in a barrier to * avoid messages from the past/future bugs. * - * Each process then flushes the entries assigned to it, and + * Each process then flushes the entries assigned to it, and * marks all other entries on the candidate list as clean. * - * Finally, all processes participate in a second barrier to + * Finally, all processes participate in a second barrier to * avoid messages from the past/future bugs. * * At the end of this process, process 0 and only process 0 - * must tidy up its lists of dirtied and cleaned entries. + * must tidy up its lists of dirtied and cleaned entries. * These lists are not used in the distributed metadata write - * strategy, but they must be maintained should we shift + * strategy, but they must be maintained should we shift * to a strategy that uses them. * * Return: Success: non-negative @@ -1676,7 +1676,7 @@ H5AC__rsp__dist_md_write__flush(H5F_t *f) HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); - /* first construct the candidate list -- initially, this will be in the + /* first construct the candidate list -- initially, this will be in the * form of a skip list. We will convert it later. */ if(H5C_construct_candidate_list__clean_cache(cache_ptr) < 0) @@ -1692,7 +1692,7 @@ H5AC__rsp__dist_md_write__flush(H5F_t *f) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer.") /* Initial sync point barrier - * + * * When flushing from within the close operation from a file, * it's possible to skip this barrier (on the second flush of the cache). */ @@ -1715,7 +1715,7 @@ H5AC__rsp__dist_md_write__flush(H5F_t *f) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't apply candidate list.") /* this code exists primarily for the test bed -- it allows us to - * enforce posix semantics on the server that pretends to be a + * enforce posix semantics on the server that pretends to be a * file system in our parallel tests. */ if(aux_ptr->write_done) @@ -1752,45 +1752,45 @@ done: * Function: H5AC__rsp__dist_md_write__flush_to_min_clean * * Purpose: Routine for handling the details of running a sync point - * triggered by the accumulation of dirty metadata (as + * triggered by the accumulation of dirty metadata (as * opposed to a flush call to the API) when the distributed * metadata write strategy is selected. * * After invocation and initial sanity checking this function - * first checks to see if evictions are enabled -- if they + * first checks to see if evictions are enabled -- if they * are not, the function does nothing and returns. * - * Otherwise, process zero constructs a list of entries to + * Otherwise, process zero constructs a list of entries to * be flushed in order to bring the process zero cache back - * within its min clean requirement. Note that this list + * within its min clean requirement. Note that this list * (the candidate list) may be empty. * * Then, all processes participate in a barrier. * - * After the barrier, process 0 broadcasts the number of - * entries in the candidate list prepared above, and all + * After the barrier, process 0 broadcasts the number of + * entries in the candidate list prepared above, and all * other processes receive this number. * * If this number is zero, we are done, and the function * returns without further action. * - * Otherwise, process 0 broadcasts the sorted list of + * Otherwise, process 0 broadcasts the sorted list of * candidate entries, and all other processes receive it. * - * Then, each process uses the same algorithm to assign - * each entry on the candidate list to exactly one process + * Then, each process uses the same algorithm to assign + * each entry on the candidate list to exactly one process * for flushing. * - * Each process then flushes the entries assigned to it, and + * Each process then flushes the entries assigned to it, and * marks all other entries on the candidate list as clean. * - * Finally, all processes participate in a second barrier to + * Finally, all processes participate in a second barrier to * avoid messages from the past/future bugs. * * At the end of this process, process 0 and only process 0 - * must tidy up its lists of dirtied and cleaned entries. + * must tidy up its lists of dirtied and cleaned entries. * These lists are not used in the distributed metadata write - * strategy, but they must be maintained should we shift + * strategy, but they must be maintained should we shift * to a strategy that uses them. * * Return: Success: non-negative @@ -1846,25 +1846,25 @@ done: * * Purpose: Routine for handling the details of running a sync point * that is triggered a flush -- which in turn must have been - * triggered by either a flush API call or a file close -- - * when the process 0 only metadata write strategy is selected. + * triggered by either a flush API call or a file close -- + * when the process 0 only metadata write strategy is selected. * * First, all processes participate in a barrier. * * Then process zero flushes all dirty entries, and broadcasts - * they number of clean entries (if any) to all the other + * they number of clean entries (if any) to all the other * caches. * * If this number is zero, we are done. * - * Otherwise, process 0 broadcasts the list of cleaned + * Otherwise, process 0 broadcasts the list of cleaned * entries, and all other processes which are part of this * file group receive it, and mark the listed entries as * clean in their caches. * - * Since all processes have the same set of dirty + * Since all processes have the same set of dirty * entries at the beginning of the sync point, and all - * entries that will be written are written before + * entries that will be written are written before * process zero broadcasts the number of cleaned entries, * there is no need for a closing barrier. * @@ -1896,8 +1896,8 @@ H5AC__rsp__p0_only__flush(H5F_t *f) HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); - /* To prevent "messages from the future" we must - * synchronize all processes before we start the flush. + /* To prevent "messages from the future" we must + * synchronize all processes before we start the flush. * Hence the following barrier. * * However, when flushing from within the close operation from a file, @@ -1925,7 +1925,7 @@ H5AC__rsp__p0_only__flush(H5F_t *f) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.") /* this code exists primarily for the test bed -- it allows us to - * enforce POSIX semantics on the server that pretends to be a + * enforce POSIX semantics on the server that pretends to be a * file system in our parallel tests. */ if(aux_ptr->write_done) @@ -1945,32 +1945,32 @@ done: * Function: H5AC__rsp__p0_only__flush_to_min_clean * * Purpose: Routine for handling the details of running a sync point - * triggered by the accumulation of dirty metadata (as + * triggered by the accumulation of dirty metadata (as * opposed to a flush call to the API) when the process 0 * only metadata write strategy is selected. * * After invocation and initial sanity checking this function - * first checks to see if evictions are enabled -- if they + * first checks to see if evictions are enabled -- if they * are not, the function does nothing and returns. * * Otherwise, all processes participate in a barrier. * - * After the barrier, if this is process 0, the function - * causes the cache to flush sufficient entries to get the - * cache back within its minimum clean fraction, and broadcast - * the number of entries which have been flushed since + * After the barrier, if this is process 0, the function + * causes the cache to flush sufficient entries to get the + * cache back within its minimum clean fraction, and broadcast + * the number of entries which have been flushed since * the last sync point, and are still clean. * * If this number is zero, we are done. * - * Otherwise, process 0 broadcasts the list of cleaned + * Otherwise, process 0 broadcasts the list of cleaned * entries, and all other processes which are part of this * file group receive it, and mark the listed entries as * clean in their caches. * - * Since all processes have the same set of dirty + * Since all processes have the same set of dirty * entries at the beginning of the sync point, and all - * entries that will be written are written before + * entries that will be written are written before * process zero broadcasts the number of cleaned entries, * there is no need for a closing barrier. * @@ -2026,9 +2026,9 @@ H5AC__rsp__p0_only__flush_to_min_clean(H5F_t *f) if(0 == aux_ptr->mpi_rank) { herr_t result; - /* here, process 0 flushes as many entries as necessary to + /* here, process 0 flushes as many entries as necessary to * comply with the currently specified min clean size. - * Note that it is quite possible that no entries will be + * Note that it is quite possible that no entries will be * flushed. */ @@ -2066,23 +2066,23 @@ done: * Function: H5AC__run_sync_point * * Purpose: Top level routine for managing a sync point between all - * meta data caches in the parallel case. Since all caches + * meta data caches in the parallel case. Since all caches * see the same sequence of dirty metadata, we simply count * bytes of dirty metadata, and run a sync point whenever the * number of dirty bytes of metadata seen since the last * sync point exceeds a threshold that is common across all - * processes. We also run sync points in response to + * processes. We also run sync points in response to * HDF5 API calls triggering either a flush or a file close. * - * In earlier versions of PHDF5, only the metadata cache with - * mpi rank 0 was allowed to write to file. All other + * In earlier versions of PHDF5, only the metadata cache with + * mpi rank 0 was allowed to write to file. All other * metadata caches on processes with rank greater than 0 were - * required to retain dirty entries until they were notified + * required to retain dirty entries until they were notified * that the entry is was clean. * - * This function was created to make it easier for us to - * experiment with other options, as it is a single point - * for the execution of sync points. + * This function was created to make it easier for us to + * experiment with other options, as it is a single point + * for the execution of sync points. * * Return: Success: non-negative * @@ -2197,13 +2197,13 @@ done: * * Purpose: In the distributed metadata write strategy, not all dirty * entries are written by process 0 -- thus we must tidy - * up the dirtied, and flushed and still clean lists + * up the dirtied, and flushed and still clean lists * maintained by process zero after each sync point. * * This procedure exists to tend to this issue. * * At this point, all entries that process 0 cleared should - * have been removed from both the dirty and flushed and + * have been removed from both the dirty and flushed and * still clean lists, and entries that process 0 has flushed * should have been removed from the dirtied list and added * to the flushed and still clean list. @@ -2214,7 +2214,7 @@ done: * them to be used should the metadata write strategy change * to one that uses these lists. * - * Thus for our purposes, all we need to do is remove from + * Thus for our purposes, all we need to do is remove from * the dirtied and flushed and still clean lists all * references to entries that appear in the candidate list. * @@ -2246,11 +2246,11 @@ H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates, HDassert(num_candidates > 0); HDassert(candidates_list_ptr != NULL); - /* clean up dirtied and flushed and still clean lists by removing - * all entries on the candidate list. Cleared entries should - * have been removed from both the dirty and cleaned lists at - * this point, flushed entries should have been added to the - * cleaned list. However, for this metadata write strategy, + /* clean up dirtied and flushed and still clean lists by removing + * all entries on the candidate list. Cleared entries should + * have been removed from both the dirty and cleaned lists at + * this point, flushed entries should have been added to the + * cleaned list. However, for this metadata write strategy, * we just want to remove all references to the candidate entries. */ for(u = 0; u < num_candidates; u++) { @@ -2260,7 +2260,7 @@ H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates, addr = candidates_list_ptr[u]; - /* addr may be either on the dirtied list, or on the flushed + /* addr may be either on the dirtied list, or on the flushed * and still clean list. Remove it. */ if(NULL != (d_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->d_slist_ptr, (void *)&addr))) diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h index 8997382..86ff385 100644 --- a/src/H5ACpkg.h +++ b/src/H5ACpkg.h @@ -153,23 +153,23 @@ H5FL_EXTERN(H5AC_aux_t); * * Update: When the above was written, I planned to allow the process * 0 metadata cache to write dirty metadata between sync points. - * However, testing indicated that this allowed occasional + * However, testing indicated that this allowed occasional * messages from the future to reach the caches on other processes. * * To resolve this, the code was altered to require that all metadata * writes take place during sync points -- which solved the problem. - * Initially all writes were performed by the process 0 cache. This + * Initially all writes were performed by the process 0 cache. This * approach was later replaced with a distributed write approach - * in which each process writes a subset of the metadata to be - * written. - * - * After thinking on the matter for a while, I arrived at the - * conclusion that the process 0 cache could be allowed to write - * dirty metadata between sync points if it restricted itself to - * entries that had been dirty at the time of the previous sync point. - * + * in which each process writes a subset of the metadata to be + * written. + * + * After thinking on the matter for a while, I arrived at the + * conclusion that the process 0 cache could be allowed to write + * dirty metadata between sync points if it restricted itself to + * entries that had been dirty at the time of the previous sync point. + * * To date, there has been no attempt to implement this optimization. - * However, should it be attempted, much of the supporting code + * However, should it be attempted, much of the supporting code * should still be around. * * JRM -- 1/6/15 @@ -206,14 +206,14 @@ H5FL_EXTERN(H5AC_aux_t); * broadcast. This field is reset to zero after each such * broadcast. * - * metadata_write_strategy: Integer code indicating how we will be - * writing the metadata. In the first incarnation of + * metadata_write_strategy: Integer code indicating how we will be + * writing the metadata. In the first incarnation of * this code, all writes were done from process 0. This - * field exists to facilitate experiments with other + * field exists to facilitate experiments with other * strategies. * * At present, this field must be set to either - * H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY or + * H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY or * H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED. * * dirty_bytes_propagations: This field only exists when the @@ -267,7 +267,7 @@ H5FL_EXTERN(H5AC_aux_t); * * Things have changed a bit since the following four fields were defined. * If metadata_write_strategy is H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, - * all comments hold as before -- with the caviate that pending further + * all comments hold as before -- with the caviate that pending further * coding, the process 0 metadata cache is forbidden to flush entries outside * of a sync point. * @@ -337,16 +337,16 @@ H5FL_EXTERN(H5AC_aux_t); * needed. * * Note: This field has been extended for use by all processes - * with the addition of support for the distributed - * metadata write strategy. + * with the addition of support for the distributed + * metadata write strategy. * JRM -- 5/9/10 * * sync_point_done: In the parallel test bed, it is necessary to verify * that the expected writes, and only the expected writes, * have taken place at the end of each sync point. * - * The sync_point_done callback allows t_cache to perform - * this verification. The field is set to NULL when the + * The sync_point_done callback allows t_cache to perform + * this verification. The field is set to NULL when the * callback is not needed. * * The following field supports the metadata cache image feature. @@ -402,7 +402,7 @@ typedef struct H5AC_aux_t void (* write_done)(void); - void (* sync_point_done)(unsigned num_writes, + void (* sync_point_done)(unsigned num_writes, haddr_t * written_entries_tbl); unsigned p0_image_len; diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h index 691f7c6..b932e16 100644 --- a/src/H5ACprivate.h +++ b/src/H5ACprivate.h @@ -88,18 +88,18 @@ typedef enum { * * Hence the following, somewhat odd set of #defines. * - * NOTE: test/cache plays games with the f->shared->cache, and thus - * setting H5AC_DUMP_STATS_ON_CLOSE will generate constant, - * irrelevant data when run with that test program. See + * NOTE: test/cache plays games with the f->shared->cache, and thus + * setting H5AC_DUMP_STATS_ON_CLOSE will generate constant, + * irrelevant data when run with that test program. See * comments on setup_cache() / takedown_cache() in test/cache_common.c. * for details. * * If you need to dump stats at file close in test/cache.c, - * use the dump_stats parameter to takedown_cache(), or call + * use the dump_stats parameter to takedown_cache(), or call * H5C_stats() directly. * JRM -- 4/12/15 * - * Added the H5AC_DUMP_IMAGE_STATS_ON_CLOSE #define, which works much + * Added the H5AC_DUMP_IMAGE_STATS_ON_CLOSE #define, which works much * the same way as H5AC_DUMP_STATS_ON_CLOSE. However, the set of stats * displayed is much smaller, and directed purely at the cache image feature. * @@ -419,7 +419,7 @@ H5_DLL herr_t H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t *config_ptr); /* Cache image routines */ -H5_DLL herr_t H5AC_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, +H5_DLL herr_t H5AC_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, hsize_t len, hbool_t rw); H5_DLL herr_t H5AC_validate_cache_image_config(H5AC_cache_image_config_t *config_ptr); H5_DLL hbool_t H5AC_cache_image_pending(const H5F_t *f); diff --git a/src/H5Adense.c b/src/H5Adense.c index 7491dd2..a73fcc6 100644 --- a/src/H5Adense.c +++ b/src/H5Adense.c @@ -310,17 +310,17 @@ H5A__dense_fnd_cb(const H5A_t *attr, hbool_t *took_ownership, void *_user_attr) HDassert(took_ownership); /* - * If there is an attribute already stored in "user_attr", - * we need to free the dynamially allocated spaces for the - * attribute, otherwise we got infinite loop closing library due to + * If there is an attribute already stored in "user_attr", + * we need to free the dynamially allocated spaces for the + * attribute, otherwise we got infinite loop closing library due to * outstanding allocation. (HDFFV-10659) * * This callback is used by H5A__dense_remove() to close/free the * attribute stored in "user_attr" (via H5O__msg_free_real()) after * the attribute node is deleted from the name index v2 B-tree. - * The issue is: - * When deleting the attribute node from the B-tree, - * if the attribute is found in the intermediate B-tree nodes, + * The issue is: + * When deleting the attribute node from the B-tree, + * if the attribute is found in the intermediate B-tree nodes, * which may be merged/redistributed, we need to free the dynamically * allocated spaces for the intermediate decoded attribute. */ diff --git a/src/H5Adeprec.c b/src/H5Adeprec.c index e0a0f55..dd5eae3 100644 --- a/src/H5Adeprec.c +++ b/src/H5Adeprec.c @@ -192,7 +192,7 @@ H5Aopen_name(hid_t loc_id, const char *name) { void *attr = NULL; /* attr object from VOL connector */ H5VL_object_t *vol_obj = NULL; /* Object of loc_id */ - H5VL_loc_params_t loc_params; + H5VL_loc_params_t loc_params; hid_t ret_value = H5I_INVALID_HID; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) diff --git a/src/H5B2.c b/src/H5B2.c index 6b0d7a3..c9ed303 100644 --- a/src/H5B2.c +++ b/src/H5B2.c @@ -507,7 +507,7 @@ H5B2_find(H5B2_t *bt2, void *udata, H5B2_found_t op, void *op_data) if((hdr->cls->compare)(udata, hdr->min_native_rec, &cmp) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") if(cmp < 0) - HGOTO_DONE(FALSE) /* Less than the least record--not found */ + HGOTO_DONE(FALSE) /* Less than the least record--not found */ else if(cmp == 0) { /* Record is found */ if(op && (op)(hdr->min_native_rec, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "'found' callback failed for B-tree find operation") @@ -518,7 +518,7 @@ H5B2_find(H5B2_t *bt2, void *udata, H5B2_found_t op, void *op_data) if((hdr->cls->compare)(udata, hdr->max_native_rec, &cmp) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") if(cmp > 0) - HGOTO_DONE(FALSE) /* Less than the least record--not found */ + HGOTO_DONE(FALSE) /* Less than the least record--not found */ else if(cmp == 0) { /* Record is found */ if(op && (op)(hdr->max_native_rec, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "'found' callback failed for B-tree find operation") @@ -1601,7 +1601,7 @@ done: * Function: H5B2_patch_file * * Purpose: Patch the top-level file pointer contained in bt2 - * to point to idx_info->f if they are different. + * to point to idx_info->f if they are different. * This is possible because the file pointer in bt2 can be * closed out if bt2 remains open. * diff --git a/src/H5B2int.c b/src/H5B2int.c index ea03ed6..53ac835 100644 --- a/src/H5B2int.c +++ b/src/H5B2int.c @@ -57,7 +57,7 @@ /********************/ /* Local Prototypes */ /********************/ -static herr_t H5B2__update_child_flush_depends(H5B2_hdr_t *hdr, +static herr_t H5B2__update_child_flush_depends(H5B2_hdr_t *hdr, unsigned depth, const H5B2_node_ptr_t *node_ptrs, unsigned start_idx, unsigned end_idx, void *old_parent, void *new_parent); @@ -1757,7 +1757,7 @@ H5B2__create_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry) herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT - + /* Sanity check */ HDassert(parent_entry); HDassert(child_entry); @@ -1794,7 +1794,7 @@ H5B2__update_flush_depend(H5B2_hdr_t *hdr, unsigned depth, herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE - + /* Sanity checks */ HDassert(hdr); HDassert(depth > 0); @@ -1884,14 +1884,14 @@ done: */ static herr_t H5B2__update_child_flush_depends(H5B2_hdr_t *hdr, unsigned depth, - const H5B2_node_ptr_t *node_ptrs, unsigned start_idx, unsigned end_idx, + const H5B2_node_ptr_t *node_ptrs, unsigned start_idx, unsigned end_idx, void *old_parent, void *new_parent) { unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC - + /* Sanity checks */ HDassert(hdr); HDassert(depth > 1); @@ -1929,7 +1929,7 @@ H5B2__destroy_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry) herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT - + /* Sanity check */ HDassert(parent_entry); HDassert(child_entry); diff --git a/src/H5B2internal.c b/src/H5B2internal.c index 92c802e..1fee0af 100644 --- a/src/H5B2internal.c +++ b/src/H5B2internal.c @@ -388,7 +388,7 @@ H5B2__insert_internal(H5B2_hdr_t *hdr, uint16_t depth, unsigned *parent_cache_in size_t split_nrec; /* Number of records to split node at */ /* Locate node pointer for child */ - if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, + if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") if(cmp == 0) @@ -444,7 +444,7 @@ H5B2__insert_internal(H5B2_hdr_t *hdr, uint16_t depth, unsigned *parent_cache_in /* Locate node pointer for child (after split/redistribute) */ /* Actually, this can be easily updated (for 2-node redistrib.) and shouldn't require re-searching */ - if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, + if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") if(cmp == 0) diff --git a/src/H5B2leaf.c b/src/H5B2leaf.c index 7ed0468..b542061 100644 --- a/src/H5B2leaf.c +++ b/src/H5B2leaf.c @@ -1027,7 +1027,7 @@ done: *------------------------------------------------------------------------- */ H5_ATTR_PURE herr_t -H5B2__assert_leaf(const H5B2_hdr_t H5_ATTR_NDEBUG_UNUSED *hdr, +H5B2__assert_leaf(const H5B2_hdr_t H5_ATTR_NDEBUG_UNUSED *hdr, const H5B2_leaf_t H5_ATTR_NDEBUG_UNUSED *leaf) { /* General sanity checking on node */ diff --git a/src/H5B2pkg.h b/src/H5B2pkg.h index 27229f1..338db5a 100644 --- a/src/H5B2pkg.h +++ b/src/H5B2pkg.h @@ -433,7 +433,7 @@ H5_DLL herr_t H5B2__remove_internal(H5B2_hdr_t *hdr, hbool_t *depth_decreased, H5_DLL herr_t H5B2__remove_leaf(H5B2_hdr_t *hdr, H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *parent, void *udata, H5B2_remove_t op, void *op_data); -H5_DLL herr_t H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, +H5_DLL herr_t H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hbool_t *depth_decreased, void *swap_loc, void *swap_parent, uint16_t depth, H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr, H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t n, diff --git a/src/H5B2test.c b/src/H5B2test.c index 0f4a39c..7affd49 100644 --- a/src/H5B2test.c +++ b/src/H5B2test.c @@ -562,7 +562,7 @@ H5B2__get_node_info_test(H5B2_t *bt2, void *udata, H5B2_node_info_test_t *ninfo) } /* end if */ /* Locate node pointer for child */ - if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, + if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") @@ -616,7 +616,7 @@ H5B2__get_node_info_test(H5B2_t *bt2, void *udata, H5B2_node_info_test_t *ninfo) } /* end if */ /* Locate record */ - if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, + if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") diff --git a/src/H5C.c b/src/H5C.c index 2a90bd1..91e4158 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -171,7 +171,7 @@ static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t * type, haddr_t addr, size_t *len, hbool_t actual); #if H5C_DO_SLIST_SANITY_CHECKS -static hbool_t H5C_entry_in_skip_list(H5C_t * cache_ptr, +static hbool_t H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t *target_ptr); #endif /* H5C_DO_SLIST_SANITY_CHECKS */ @@ -722,10 +722,10 @@ H5C_free_tag_list_cb(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED * * Function: H5C_prep_for_file_close * - * Purpose: This function should be called just prior to the cache - * flushes at file close. There should be no protected + * Purpose: This function should be called just prior to the cache + * flushes at file close. There should be no protected * entries in the cache at this point. - * + * * Return: Non-negative on success/Negative on failure * * Programmer: John Mainzer @@ -750,8 +750,8 @@ H5C_prep_for_file_close(H5F_t *f) HDassert(cache_ptr); HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - /* For now at least, it is possible to receive the - * close warning more than once -- the following + /* For now at least, it is possible to receive the + * close warning more than once -- the following * if statement handles this. */ if(cache_ptr->close_warning_received) @@ -767,8 +767,8 @@ H5C_prep_for_file_close(H5F_t *f) #ifdef H5_HAVE_PARALLEL if ( ( H5F_INTENT(f) & H5F_ACC_RDWR ) && - ( ! image_generated ) && - ( cache_ptr->aux_ptr != NULL ) && + ( ! image_generated ) && + ( cache_ptr->aux_ptr != NULL ) && ( f->shared->fs_persist ) ) { /* If persistent free space managers are enabled, flushing the * metadata cache may result in the deletion, insertion, and/or @@ -1011,31 +1011,31 @@ done: * Programmer: John Mainzer * 6/2/04 * - * Changes: Modified function to test for slist chamges in + * Changes: Modified function to test for slist chamges in * pre_serialize and serialize callbacks, and re-start * scans through the slist when such changes occur. * * This has been a potential problem for some time, - * and there has been code in this function to deal - * with elements of this issue. However the shift + * and there has been code in this function to deal + * with elements of this issue. However the shift * to the V3 cache in combination with the activities - * of some of the cache clients (in particular the + * of some of the cache clients (in particular the * free space manager and the fractal heap) have * made this re-work necessary. * * JRM -- 12/13/14 * - * Modified function to support rings. Basic idea is that + * Modified function to support rings. Basic idea is that * every entry in the cache is assigned to a ring. Entries - * in the outermost ring are flushed first, followed by - * those in the next outermost ring, and so on until the - * innermost ring is flushed. See header comment on - * H5C_ring_t in H5Cprivate.h for a more detailed + * in the outermost ring are flushed first, followed by + * those in the next outermost ring, and so on until the + * innermost ring is flushed. See header comment on + * H5C_ring_t in H5Cprivate.h for a more detailed * discussion. * * JRM -- 8/30/15 * - * Modified function to call the free space manager + * Modified function to call the free space manager * settling functions. * JRM -- 6/9/16 * @@ -1111,7 +1111,7 @@ H5C_flush_cache(H5F_t *f, unsigned flags) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed") } /* end if */ else { - /* flush each ring, starting from the outermost ring and + /* flush each ring, starting from the outermost ring and * working inward. */ ring = H5C_RING_USER; @@ -2245,7 +2245,7 @@ H5C_protect(H5F_t * f, if(entry_ptr->prefetched) { /* This call removes the prefetched entry from the cache, - * and replaces it with an entry deserialized from the + * and replaces it with an entry deserialized from the * image of the prefetched entry. */ if(H5C__deserialize_prefetched_entry(f, cache_ptr, &entry_ptr, type, addr, udata) < 0) @@ -2314,7 +2314,7 @@ H5C_protect(H5F_t * f, if(cache_ptr->ignore_tags != TRUE) { haddr_t tag; /* Tag value */ - /* The entry is already in the cache, but make sure that the tag value + /* The entry is already in the cache, but make sure that the tag value * is still legal. This will ensure that had the entry NOT been in the * cache, tagging was still set up correctly and it would have received * a legal tag value after getting loaded from disk. @@ -2322,7 +2322,7 @@ H5C_protect(H5F_t * f, /* Get the tag */ tag = H5CX_get_tag(); - + if(H5C_verify_tag(entry_ptr->type->id, tag) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed") } /* end if */ @@ -2338,9 +2338,9 @@ H5C_protect(H5F_t * f, hit = FALSE; - if(NULL == (thing = H5C_load_entry(f, + if(NULL == (thing = H5C_load_entry(f, #ifdef H5_HAVE_PARALLEL - coll_access, + coll_access, #endif /* H5_HAVE_PARALLEL */ type, addr, udata))) HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry") @@ -2449,12 +2449,12 @@ H5C_protect(H5F_t * f, * ******************************************* * * Set the flush_last field - * of the newly loaded entry before inserting it into the - * index. Must do this, as the index tracked the number of - * entries with the flush_last field set, but assumes that + * of the newly loaded entry before inserting it into the + * index. Must do this, as the index tracked the number of + * entries with the flush_last field set, but assumes that * the field will not change after insertion into the index. * - * Note that this means that the H5C__FLUSH_LAST_FLAG flag + * Note that this means that the H5C__FLUSH_LAST_FLAG flag * is ignored if the entry is already in cache. */ entry_ptr->flush_me_last = flush_last; @@ -2486,7 +2486,7 @@ H5C_protect(H5F_t * f, HDassert(entry_ptr->ro_ref_count > 0); (entry_ptr->ro_ref_count)++; } /* end if */ - else + else HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?") } /* end if */ else { @@ -2890,7 +2890,7 @@ done: * 3/22/06 * * Changes: Added extreme sanity checks on entry and exit. - JRM -- 4/26/14 + JRM -- 4/26/14 * *------------------------------------------------------------------------- */ @@ -3112,7 +3112,7 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) if(entry_ptr->flush_dep_nparents > 0) if(H5C__mark_flush_dep_dirty(entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag") - } /* end if */ + } /* end if */ /* Check for newly clean entry */ else if(!was_clean && !entry_ptr->is_dirty) { /* If the entry's type has a 'notify' callback send a 'entry cleaned' @@ -3969,8 +3969,8 @@ H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted) HDassert( (cache_ptr->resize_ctl).min_clean_fraction <= (double)100.0f ); /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this - * is a re-entrant call via a client callback called in the resize - * process. To avoid an infinite recursion, set reentrant_call to + * is a re-entrant call via a client callback called in the resize + * process. To avoid an infinite recursion, set reentrant_call to * TRUE, and goto done. */ if(cache_ptr->resize_in_progress) { @@ -5157,7 +5157,7 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags) while(entry_ptr) { /* Check ring */ HDassert(entry_ptr->ring == H5C_RING_SB); - + /* Advance to next entry in pinned entry list */ entry_ptr = entry_ptr->next; } /* end while */ @@ -5199,7 +5199,7 @@ done: * until either the cache is empty, or the number of pinned * entries stops decreasing on each pass. * - * If flush dependencies appear in the target ring, the + * If flush dependencies appear in the target ring, the * function makes repeated passes through the cache flushing * entries in flush dependency order. * @@ -5252,8 +5252,8 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) /* The flush procedure here is a bit strange. * * In the outer while loop we make at least one pass through the - * cache, and then repeat until either all the pinned entries in - * the ring unpin themselves, or until the number of pinned entries + * cache, and then repeat until either all the pinned entries in + * the ring unpin themselves, or until the number of pinned entries * in the ring stops declining. In this later case, we scream and die. * * Since the fractal heap can dirty, resize, and/or move entries @@ -5309,9 +5309,9 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) /* There is also the possibility that entries will be * dirtied, resized, moved, and/or removed from the cache - * as the result of calls to the flush callbacks. We use - * the slist_len_increase and slist_size_increase increase - * fields in struct H5C_t to track these changes for purpose + * as the result of calls to the flush callbacks. We use + * the slist_len_increase and slist_size_increase increase + * fields in struct H5C_t to track these changes for purpose * of sanity checking. * * To this end, we must zero these fields before we start @@ -5326,8 +5326,8 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) * This flag is set to TRUE by H5C__flush_single_entry if the slist * is modified by a pre_serialize, serialize, or notify callback. * - * H5C_flush_invalidate_ring() uses this flag to detect any - * modifications to the slist that might corrupt the scan of + * H5C_flush_invalidate_ring() uses this flag to detect any + * modifications to the slist that might corrupt the scan of * the slist -- and restart the scan in this event. */ cache_ptr->slist_changed = FALSE; @@ -5357,14 +5357,14 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) entry_ptr = next_entry_ptr; - /* It is possible that entries will be dirtied, resized, + /* It is possible that entries will be dirtied, resized, * flushed, or removed from the cache via the take ownership - * flag as the result of pre_serialize or serialized callbacks. - * + * flag as the result of pre_serialize or serialized callbacks. + * * This in turn can corrupt the scan through the slist. * - * We test for slist modifications in the pre_serialize - * and serialize callbacks, and restart the scan of the + * We test for slist modifications in the pre_serialize + * and serialize callbacks, and restart the scan of the * slist if we find them. However, best we do some extra * sanity checking just in case. */ @@ -5497,8 +5497,8 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) if((!entry_ptr->flush_me_last || (entry_ptr->flush_me_last && cache_ptr->num_last_entries >= cache_ptr->slist_len)) && entry_ptr->flush_dep_nchildren == 0 && entry_ptr->ring == ring) { if(entry_ptr->is_protected) { - /* we have major problems -- but lets flush and - * destroy everything we can before we flag an + /* we have major problems -- but lets flush and + * destroy everything we can before we flag an * error. */ protected_entries++; @@ -5506,9 +5506,9 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) HDassert(!(entry_ptr->is_dirty)); } /* end if */ else if(!(entry_ptr->is_pinned)) { - /* if *entry_ptr is dirty, it is possible - * that one or more other entries may be - * either removed from the cache, loaded + /* if *entry_ptr is dirty, it is possible + * that one or more other entries may be + * either removed from the cache, loaded * into the cache, or moved to a new location * in the file as a side effect of the flush. * @@ -5517,14 +5517,14 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) * entry, allowing it to be removed also and * invalidating the next_entry_ptr. * - * If either of these happen, and one of the target - * or proxy entries happens to be the next entry in + * If either of these happen, and one of the target + * or proxy entries happens to be the next entry in * the hash bucket, we could either find ourselves * either scanning a non-existant entry, scanning * through a different bucket, or skipping an entry. * - * Neither of these are good, so restart the - * the scan at the head of the hash bucket + * Neither of these are good, so restart the + * the scan at the head of the hash bucket * after the flush if we detect that the next_entry_ptr * becomes invalid. * @@ -5538,11 +5538,11 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed") /* Restart the index list scan if necessary. Must - * do this if the next entry is evicted, and also if + * do this if the next entry is evicted, and also if * one or more entries are inserted, loaded, or moved * as these operations can result in part of the scan * being skipped -- which can cause a spurious failure - * if this results in the size of the pinned entry + * if this results in the size of the pinned entry * failing to decline during the pass. */ if((NULL != next_entry_ptr && NULL == cache_ptr->entry_watched_for_removal) @@ -5585,7 +5585,7 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) entry_ptr = entry_ptr->next; } /* end while */ - /* Check if the number of pinned entries in the ring is positive, and + /* Check if the number of pinned entries in the ring is positive, and * it is not declining. Scream and die if so. */ if(cur_ring_pel_len > 0 && cur_ring_pel_len >= old_ring_pel_len) { @@ -5628,16 +5628,16 @@ done: /*------------------------------------------------------------------------- * Function: H5C__flush_ring * - * Purpose: Flush the entries contained in the specified cache and + * Purpose: Flush the entries contained in the specified cache and * ring. All entries in rings outside the specified ring * must have been flushed on entry. * * If the cache contains protected entries in the specified - * ring, the function will fail, as protected entries cannot + * ring, the function will fail, as protected entries cannot * be flushed. However all unprotected entries in the target * ring should be flushed before the function returns failure. * - * If flush dependencies appear in the target ring, the + * If flush dependencies appear in the target ring, the * function makes repeated passes through the slist flushing * entries in flush dependency order. * @@ -5703,10 +5703,10 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) /* Set the cache_ptr->slist_changed to false. * - * This flag is set to TRUE by H5C__flush_single_entry if the + * This flag is set to TRUE by H5C__flush_single_entry if the * slist is modified by a pre_serialize, serialize, or notify callback. * H5C_flush_cache uses this flag to detect any modifications - * to the slist that might corrupt the scan of the slist -- and + * to the slist that might corrupt the scan of the slist -- and * restart the scan in this event. */ cache_ptr->slist_changed = FALSE; @@ -5726,8 +5726,8 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) * * To make things more entertaining, with the advent of the * fractal heap, the entry serialize callback can cause entries - * to be dirtied, resized, and/or moved. Also, the - * pre_serialize callback can result in an entry being + * to be dirtied, resized, and/or moved. Also, the + * pre_serialize callback can result in an entry being * removed from the cache via the take ownership flag. * * To deal with this, we first make note of the initial @@ -5778,24 +5778,24 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) HDassert(next_entry_ptr->is_dirty); HDassert(next_entry_ptr->in_slist); } /* end if */ - + entry_ptr = next_entry_ptr; /* With the advent of the fractal heap, the free space * manager, and the version 3 cache, it is possible - * that the pre-serialize or serialize callback will - * dirty, resize, or take ownership of other entries - * in the cache. + * that the pre-serialize or serialize callback will + * dirty, resize, or take ownership of other entries + * in the cache. * * To deal with this, I have inserted code to detect any * change in the skip list not directly under the control * of this function. If such modifications are detected, - * we must re-start the scan of the skip list to avoid + * we must re-start the scan of the skip list to avoid * the possibility that the target of the next_entry_ptr * may have been flushed or deleted from the cache. * * To verify that all such possibilities have been dealt - * with, we do a bit of extra sanity checking on + * with, we do a bit of extra sanity checking on * entry_ptr. */ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); @@ -5825,20 +5825,20 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) else next_entry_ptr = NULL; - if((!flush_marked_entries || entry_ptr->flush_marker) + if((!flush_marked_entries || entry_ptr->flush_marker) && (!entry_ptr->flush_me_last || - (entry_ptr->flush_me_last + (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len - || (flush_marked_entries && entry_ptr->flush_marker)))) + || (flush_marked_entries && entry_ptr->flush_marker)))) && (entry_ptr->flush_dep_nchildren == 0 - || entry_ptr->flush_dep_ndirty_children == 0) + || entry_ptr->flush_dep_ndirty_children == 0) && entry_ptr->ring == ring) { HDassert(entry_ptr->flush_dep_nunser_children == 0); if(entry_ptr->is_protected) { - /* we probably have major problems -- but lets - * flush everything we can before we decide + /* we probably have major problems -- but lets + * flush everything we can before we decide * whether to flag an error. */ tried_to_flush_protected_entry = TRUE; @@ -5850,9 +5850,9 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags) if(cache_ptr->slist_changed) { /* The slist has been modified by something - * other than the simple removal of the + * other than the simple removal of the * of the flushed entry after the flush. - * + * * This has the potential to corrupt the * scan through the slist, so restart it. */ @@ -5980,11 +5980,11 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) else write_entry = FALSE; - /* if we have received close warning, and we have been instructed to + /* if we have received close warning, and we have been instructed to * generate a metadata cache image, and we have actually constructed * the entry images, set suppress_image_entry_frees to TRUE. * - * Set suppress_image_entry_writes to TRUE if indicated by the + * Set suppress_image_entry_writes to TRUE if indicated by the * image_ctl flags. */ if(cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image @@ -6063,11 +6063,11 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) } /* end if ( ! (entry_ptr->image_up_to_date) ) */ } /* end if */ - /* Finally, write the image to disk. - * - * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the + /* Finally, write the image to disk. + * + * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the * in the entry's type, we silently skip the write. This - * flag should only be used in test code. + * flag should only be used in test code. */ if(write_entry) { HDassert(entry_ptr->is_dirty); @@ -6079,8 +6079,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) /* Write the image to disk unless the write is suppressed. * - * This happens if both suppress_image_entry_writes and - * entry_ptr->include_in_image are TRUE, or if the + * This happens if both suppress_image_entry_writes and + * entry_ptr->include_in_image are TRUE, or if the * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This * flag should only be used in test code */ @@ -6113,7 +6113,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) #endif /* H5_HAVE_PARALLEL */ } /* end if */ - /* if the entry has a notify callback, notify it that we have + /* if the entry has a notify callback, notify it that we have * just flushed the entry. */ if(entry_ptr->type->notify && @@ -6125,7 +6125,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * made if it was appropriate to make them. Similarly, the entry * has been written to disk if desired. * - * Thus it is now safe to update the cache data structures for the + * Thus it is now safe to update the cache data structures for the * flush. */ @@ -6179,7 +6179,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) * * 5) Remove it from the tag list for this object * - * Finally, if the destroy_entry flag is set, discard the + * Finally, if the destroy_entry flag is set, discard the * entry. */ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL) @@ -6213,7 +6213,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) /* We are either doing a flush or a clear. * * A clear and a flush are the same from the point of - * view of the replacement policy and the slist. + * view of the replacement policy and the slist. * Hence no differentiation between them. * * JRM -- 7/7/07 @@ -6223,8 +6223,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) - /* mark the entry as clean and update the index for - * entry clean. Also, call the clear callback + /* mark the entry as clean and update the index for + * entry clean. Also, call the clear callback * if defined. */ entry_ptr->is_dirty = FALSE; @@ -6256,8 +6256,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) end before the entry_ptr gets freed */ entry_addr = entry_ptr->addr; - /* Internal cache data structures should now be up to date, and - * consistent with the status of the entry. + /* Internal cache data structures should now be up to date, and + * consistent with the status of the entry. * * Now discard the entry if appropriate. */ @@ -6266,18 +6266,18 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) HDassert(0 == entry_ptr->flush_dep_nparents); /* if both suppress_image_entry_frees and entry_ptr->include_in_image - * are true, simply set entry_ptr->image_ptr to NULL, as we have + * are true, simply set entry_ptr->image_ptr to NULL, as we have * another pointer to the buffer in an instance of H5C_image_entry_t * in cache_ptr->image_entries. * * Otherwise, free the buffer if it exists. */ - if(suppress_image_entry_frees && entry_ptr->include_in_image) + if(suppress_image_entry_frees && entry_ptr->include_in_image) entry_ptr->image_ptr = NULL; else if(entry_ptr->image_ptr != NULL) entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); - /* If the entry is not a prefetched entry, verify that the flush + /* If the entry is not a prefetched entry, verify that the flush * dependency parents addresses array has been transferred. * * If the entry is prefetched, the free_isr routine will dispose of @@ -6288,8 +6288,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) HDassert(NULL == entry_ptr->fd_parent_addrs); } /* end if */ - /* Check whether we should free the space in the file that - * the entry occupies + /* Check whether we should free the space in the file that + * the entry occupies */ if(free_file_space) { hsize_t fsf_size; @@ -6326,14 +6326,14 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) /* Reset the pointer to the cache the entry is within. -QAK */ entry_ptr->cache_ptr = NULL; - /* increment entries_removed_counter and set - * last_entry_removed_ptr. As we are likely abuut to - * free the entry, recall that last_entry_removed_ptr + /* increment entries_removed_counter and set + * last_entry_removed_ptr. As we are likely abuut to + * free the entry, recall that last_entry_removed_ptr * must NEVER be dereferenced. * * Recall that these fields are maintained to allow functions - * that perform scans of lists of entries to detect the - * unexpected removal of entries (via expunge, eviction, + * that perform scans of lists of entries to detect the + * unexpected removal of entries (via expunge, eviction, * or take ownership at present), so that they can re-start * their scans if necessary. * @@ -6377,15 +6377,15 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) HDassert(take_ownership); /* client is taking ownership of the entry. - * set bad magic here too so the cache will choke + * set bad magic here too so the cache will choke * unless the entry is re-inserted properly */ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; } /* end else */ } /* if (destroy) */ - /* Check if we have to update the page buffer with cleared entries - * so it doesn't go out of date + /* Check if we have to update the page buffer with cleared entries + * so it doesn't go out of date */ if(update_page_buffer) { /* Sanity check */ @@ -6402,9 +6402,9 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed") done: - HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) || + HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) || ( ! entry_ptr->flush_in_progress ) ); - HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) || + HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) || ( take_ownership ) || ( ! entry_ptr->is_dirty ) ); FUNC_LEAVE_NOAPI(ret_value) @@ -6421,7 +6421,7 @@ done: * If exceed, adjust 'len' accordingly. * * Verify that 'len' should not exceed eoa when 'actual' is - * true i.e. 'len' is the actual length from get_load_size + * true i.e. 'len' is the actual length from get_load_size * callback with non-null image pointer. * If exceed, return error. * @@ -6442,7 +6442,7 @@ H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, FUNC_ENTER_STATIC - /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces + /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces * type to H5FD_MEM_DRAW via its call to H5F__accum_read(). * Thus we do the same for purposes of computing the EOA * for sanity checks. @@ -6574,7 +6574,7 @@ H5C_load_entry(H5F_t * f, /* Get the # of read attempts */ max_tries = tries = H5F_GET_READ_ATTEMPTS(f); - /* + /* * This do/while loop performs the following till the metadata checksum * is correct or the file's number of allowed read attempts are reached. * --read the metadata @@ -6874,8 +6874,8 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this - * is a re-entrant call via a client callback called in the make - * space in cache process. To avoid an infinite recursion, set + * is a re-entrant call via a client callback called in the make + * space in cache process. To avoid an infinite recursion, set * reentrant_call to TRUE, and goto done. */ if(cache_ptr->msic_in_progress) { @@ -6947,9 +6947,9 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) } #endif /* H5C_COLLECT_CACHE_STATS */ - /* reset entries_removed_counter and - * last_entry_removed_ptr prior to the call to - * H5C__flush_single_entry() so that we can spot + /* reset entries_removed_counter and + * last_entry_removed_ptr prior to the call to + * H5C__flush_single_entry() so that we can spot * unexpected removals of entries from the cache, * and set the restart_scan flag if proceeding * would be likely to cause us to scan an entry @@ -6966,7 +6966,7 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) restart_scan = TRUE; - } else if ( (cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size + } else if ( (cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size #ifdef H5_HAVE_PARALLEL && !(entry_ptr->coll_access) #endif /* H5_HAVE_PARALLEL */ @@ -7124,7 +7124,7 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted) prev_ptr = entry_ptr->aux_prev; - if ( ( !(entry_ptr->prefetched_dirty) ) + if ( ( !(entry_ptr->prefetched_dirty) ) #ifdef H5_HAVE_PARALLEL && ( ! (entry_ptr->coll_access) ) #endif /* H5_HAVE_PARALLEL */ @@ -7174,7 +7174,7 @@ done: * * Changes: * - * Added code to verify that the LRU contains no pinned + * Added code to verify that the LRU contains no pinned * entries. JRM -- 4/25/14 * *------------------------------------------------------------------------- @@ -7254,7 +7254,7 @@ H5C_validate_lru_list(H5C_t * cache_ptr) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed") } - if ( ( entry_ptr->is_pinned ) || + if ( ( entry_ptr->is_pinned ) || ( entry_ptr->pinned_from_client ) || ( entry_ptr->pinned_from_cache ) ) { @@ -7290,7 +7290,7 @@ done: * * Function: H5C_validate_pinned_entry_list * - * Purpose: Debugging function that scans the pinned entry list for + * Purpose: Debugging function that scans the pinned entry list for * errors. * * If an error is detected, the function generates a @@ -7422,7 +7422,7 @@ done: * * Function: H5C_validate_protected_entry_list * - * Purpose: Debugging function that scans the protected entry list for + * Purpose: Debugging function that scans the protected entry list for * errors. * * If an error is detected, the function generates a @@ -7547,11 +7547,11 @@ done: * * Function: H5C_entry_in_skip_list * - * Purpose: Debugging function that scans skip list to see if it - * is in present. We need this, as it is possible for + * Purpose: Debugging function that scans skip list to see if it + * is in present. We need this, as it is possible for * an entry to be in the skip list twice. * - * Return: FALSE if the entry is not in the skip list, and TRUE + * Return: FALSE if the entry is not in the skip list, and TRUE * if it is. * * Programmer: John Mainzer, 11/1/14 @@ -7617,7 +7617,7 @@ H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t *target_ptr) */ herr_t H5C__flush_marked_entries(H5F_t * f) -{ +{ herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE @@ -7639,7 +7639,7 @@ done: * Function: H5C_cork * * Purpose: To cork/uncork/get cork status of an object depending on "action": - * H5C__SET_CORK: + * H5C__SET_CORK: * To cork the object * Return error if the object is already corked * H5C__UNCORK: @@ -7648,7 +7648,7 @@ done: * H5C__GET_CORKED: * To retrieve the cork status of an object in * the parameter "corked" - * + * * Return: Success: Non-negative * Failure: Negative * @@ -7658,7 +7658,7 @@ done: *------------------------------------------------------------------------- */ herr_t -H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked) +H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked) { H5C_tag_info_t *tag_info; /* Points to a tag info struct */ herr_t ret_value = SUCCEED; @@ -7840,8 +7840,8 @@ done: /*------------------------------------------------------------------------- * Function: H5C__mark_flush_dep_serialized() * - * Purpose: Decrement the flush_dep_nunser_children fields of all the - * target entry's flush dependency parents in response to + * Purpose: Decrement the flush_dep_nunser_children fields of all the + * target entry's flush dependency parents in response to * the target entry becoming serialized. * * Return: Non-negative on success/Negative on failure @@ -7890,7 +7890,7 @@ done: * Function: H5C__mark_flush_dep_unserialized() * * Purpose: Increment the flush_dep_nunser_children fields of all the - * target entry's flush dependency parents in response to + * target entry's flush dependency parents in response to * the target entry becoming unserialized. * * Return: Non-negative on success/Negative on failure @@ -7916,7 +7916,7 @@ H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr) /* Sanity check */ HDassert(entry_ptr->flush_dep_parent); HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children < + HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children < entry_ptr->flush_dep_parent[u]->flush_dep_nchildren); /* increment parents number of usserialized children */ @@ -7976,11 +7976,11 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry, /*------------------------------------------------------------------------- * Function: H5C__serialize_cache * - * Purpose: Serialize (i.e. construct an on disk image) for all entries - * in the metadata cache including clean entries. + * Purpose: Serialize (i.e. construct an on disk image) for all entries + * in the metadata cache including clean entries. * * Note that flush dependencies and "flush me last" flags - * must be observed in the serialization process. + * must be observed in the serialization process. * * Note also that entries may be loaded, flushed, evicted, * expunged, relocated, resized, or removed from the cache @@ -7988,17 +7988,17 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry, * a regular flush. * * However, we are given that the cache will contain no protected - * entries on entry to this routine (although entries may be - * briefly protected and then unprotected during the serialize - * process). + * entries on entry to this routine (although entries may be + * briefly protected and then unprotected during the serialize + * process). * - * The objective of this routine is serialize all entries and - * to force all entries into their actual locations on disk. + * The objective of this routine is serialize all entries and + * to force all entries into their actual locations on disk. * - * The initial need for this routine is to settle all entries - * in the cache prior to construction of the metadata cache + * The initial need for this routine is to settle all entries + * in the cache prior to construction of the metadata cache * image so that the size of the cache image can be calculated. - * However, I gather that other uses for the routine are + * However, I gather that other uses for the routine are * under consideration. * * Return: Non-negative on success/Negative on failure or if there was @@ -8069,10 +8069,10 @@ H5C__serialize_cache(H5F_t *f) #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ #ifndef NDEBUG - /* if this is a debug build, set the serialization_count field of + /* if this is a debug build, set the serialization_count field of * each entry in the cache to zero before we start the serialization. * This allows us to detect the case in which any entry is serialized - * more than once (a performance issues), and more importantly, the + * more than once (a performance issues), and more importantly, the * case is which any flush depencency parent is serializes more than * once (a correctness issue). */ @@ -8088,10 +8088,10 @@ H5C__serialize_cache(H5F_t *f) } /* end block */ #endif /* NDEBUG */ - /* set cache_ptr->serialization_in_progress to TRUE, and back + /* set cache_ptr->serialization_in_progress to TRUE, and back * to FALSE at the end of the function. Must maintain this flag - * to support H5C_get_serialization_in_progress(), which is in - * turn required to support sanity checking in some cache + * to support H5C_get_serialization_in_progress(), which is in + * turn required to support sanity checking in some cache * clients. */ HDassert(!cache_ptr->serialization_in_progress); @@ -8169,16 +8169,16 @@ done: * * If the cache contains protected entries in the specified * ring, the function will fail, as protected entries cannot - * be serialized. However all unprotected entries in the - * target ring should be serialized before the function + * be serialized. However all unprotected entries in the + * target ring should be serialized before the function * returns failure. * * If flush dependencies appear in the target ring, the * function makes repeated passes through the index list * serializing entries in flush dependency order. * - * All entries outside the H5C_RING_SBE are marked for - * inclusion in the cache image. Entries in H5C_RING_SBE + * All entries outside the H5C_RING_SBE are marked for + * inclusion in the cache image. Entries in H5C_RING_SBE * and below are marked for exclusion from the image. * * Return: Non-negative on success/Negative on failure or if there was @@ -8213,76 +8213,76 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) /* The objective here is to serialize all entries in the cache ring * in flush dependency order. * - * The basic algorithm is to scan the cache index list looking for + * The basic algorithm is to scan the cache index list looking for * unserialized entries that are either not in a flush dependency * relationship, or which have no unserialized children. Any such - * entry is serialized and its flush dependency parents (if any) are + * entry is serialized and its flush dependency parents (if any) are * informed -- allowing them to decrement their userialized child counts. * * However, this algorithm is complicated by the ability - * of client serialization callbacks to perform operations on - * on the cache which can result in the insertion, deletion, + * of client serialization callbacks to perform operations on + * on the cache which can result in the insertion, deletion, * relocation, resize, dirty, flush, eviction, or removal (via the * take ownership flag) of entries. Changes in the flush dependency * structure are also possible. * - * On the other hand, the algorithm is simplified by the fact that - * we are serializing, not flushing. Thus, as long as all entries + * On the other hand, the algorithm is simplified by the fact that + * we are serializing, not flushing. Thus, as long as all entries * are serialized correctly, it doesn't matter if we have to go back * and serialize an entry a second time. * - * These possible actions result in the following modfications to + * These possible actions result in the following modfications to * tha basic algorithm: * - * 1) In the event of an entry expunge, eviction or removal, we must - * restart the scan as it is possible that the next entry in our + * 1) In the event of an entry expunge, eviction or removal, we must + * restart the scan as it is possible that the next entry in our * scan is no longer in the cache. Were we to examine this entry, * we would be accessing deallocated memory. * - * 2) A resize, dirty, or insertion of an entry may result in the - * the increment of a flush dependency parent's dirty and/or - * unserialized child count. In the context of serializing the - * the cache, this is a non-issue, as even if we have already - * serialized the parent, it will be marked dirty and its image - * marked out of date if appropriate when the child is serialized. - * + * 2) A resize, dirty, or insertion of an entry may result in the + * the increment of a flush dependency parent's dirty and/or + * unserialized child count. In the context of serializing the + * the cache, this is a non-issue, as even if we have already + * serialized the parent, it will be marked dirty and its image + * marked out of date if appropriate when the child is serialized. + * * However, this is a major issue for a flush, as were this to happen * in a flush, it would violate the invariant that the flush dependency - * feature is intended to enforce. As the metadata cache has no - * control over the behavior of cache clients, it has no way of + * feature is intended to enforce. As the metadata cache has no + * control over the behavior of cache clients, it has no way of * preventing this behaviour. However, it should detect it if at all - * possible. + * possible. * * Do this by maintaining a count of the number of times each entry is - * serialized during a cache serialization. If any flush dependency + * serialized during a cache serialization. If any flush dependency * parent is serialized more than once, throw an assertion failure. * - * 3) An entry relocation will typically change the location of the - * entry in the index list. This shouldn't cause problems as we - * will scan the index list until we make a complete pass without - * finding anything to serialize -- making relocations of either + * 3) An entry relocation will typically change the location of the + * entry in the index list. This shouldn't cause problems as we + * will scan the index list until we make a complete pass without + * finding anything to serialize -- making relocations of either * the current or next entries irrelevant. * - * Note that since a relocation may result in our skipping part of + * Note that since a relocation may result in our skipping part of * the index list, we must always do at least one more pass through * the index list after an entry relocation. * - * 4) Changes in the flush dependency structure are possible on + * 4) Changes in the flush dependency structure are possible on * entry insertion, load, expunge, evict, or remove. Destruction - * of a flush dependency has no effect, as it can only relax the + * of a flush dependency has no effect, as it can only relax the * flush dependencies. Creation of a flush dependency can create - * an unserialized child of a flush dependency parent where all + * an unserialized child of a flush dependency parent where all * flush dependency children were previously serialized. Should * this child dirty the flush dependency parent when it is serialized, * the parent will be re-serialized. * - * Per the discussion of 2) above, this is a non issue for cache + * Per the discussion of 2) above, this is a non issue for cache * serialization, and a major problem for cache flush. Using the - * same detection mechanism, throw an assertion failure if this - * condition appears. + * same detection mechanism, throw an assertion failure if this + * condition appears. * - * Observe that either eviction or removal of entries as a result of - * a serialization is not a problem as long as the flush depencency + * Observe that either eviction or removal of entries as a result of + * a serialization is not a problem as long as the flush depencency * tree does not change beyond the removal of a leaf. */ while(!done) { @@ -8300,7 +8300,7 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); /* Verify that either the entry is already serialized, or - * that it is assigned to either the target or an inner + * that it is assigned to either the target or an inner * ring. */ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date)); @@ -8367,9 +8367,9 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring) cache_ptr->entries_relocated_counter = 0; /* At this point, all entries not marked "flush me last" and in - * the current ring or outside it should be serialized and have up - * to date images. Scan the index list again to serialize the - * "flush me last" entries (if they are in the current ring) and to + * the current ring or outside it should be serialized and have up + * to date images. Scan the index list again to serialize the + * "flush me last" entries (if they are in the current ring) and to * verify that all other entries have up to date images. */ entry_ptr = cache_ptr->il_head; @@ -8422,7 +8422,7 @@ done: /*------------------------------------------------------------------------- * Function: H5C__serialize_single_entry * - * Purpose: Serialize the cache entry pointed to by the entry_ptr + * Purpose: Serialize the cache entry pointed to by the entry_ptr * parameter. * * Return: Non-negative on success/Negative on failure @@ -8487,12 +8487,12 @@ done: * Purpose: Serialize an entry and generate its image. * * Note: This may cause the entry to be re-sized and/or moved in - * the cache. + * the cache. * - * As we will not update the metadata cache's data structures - * until we we finish the write, we must touch up these - * data structures for size and location changes even if we - * are about to delete the entry from the cache (i.e. on a + * As we will not update the metadata cache's data structures + * until we we finish the write, we must touch up these + * data structures for size and location changes even if we + * are about to delete the entry from the cache (i.e. on a * flush destroy). * * Return: Non-negative on success/Negative on failure @@ -8528,7 +8528,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) old_addr = entry_ptr->addr; /* Call client's pre-serialize callback, if there's one */ - if(entry_ptr->type->pre_serialize && + if(entry_ptr->type->pre_serialize && (entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size, &new_addr, &new_len, &serialize_flags) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry") @@ -8596,7 +8596,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) */ H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len); - /* As we haven't updated the cache data structures for + /* As we haven't updated the cache data structures for * for the flush or flush destroy yet, the entry should * be in the slist. Thus update it for the size change. */ @@ -8608,8 +8608,8 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) entry_ptr->size = new_len; } /* end if */ - /* If required, udate the entry and the cache data structures - * for a move + /* If required, udate the entry and the cache data structures + * for a move */ if(serialize_flags & H5C__SERIALIZE_MOVED_FLAG) { /* Update stats and entries relocated counter */ @@ -8641,7 +8641,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) #endif /* H5C_DO_MEMORY_SANITY_CHECKS */ entry_ptr->image_up_to_date = TRUE; - /* Propagate the fact that the entry is serialized up the + /* Propagate the fact that the entry is serialized up the * flush dependency chain if appropriate. Since the image must * have been out of date for this function to have been called * (see assertion on entry), no need to check that -- only check @@ -8766,8 +8766,8 @@ H5C_remove_entry(void *_entry) if(entry == cache->entry_watched_for_removal) cache->entry_watched_for_removal = NULL; - /* Internal cache data structures should now be up to date, and - * consistent with the status of the entry. + /* Internal cache data structures should now be up to date, and + * consistent with the status of the entry. * * Now clean up internal cache fields if appropriate. */ diff --git a/src/H5CX.c b/src/H5CX.c index 160aa84..df335ea 100644 --- a/src/H5CX.c +++ b/src/H5CX.c @@ -706,7 +706,7 @@ H5CX__get_context(void) /* No associated value with current thread - create one */ #ifdef H5_HAVE_WIN_THREADS /* Win32 has to use LocalAlloc to match the LocalFree in DllMain */ - ctx = (H5CX_node_t **)LocalAlloc(LPTR, sizeof(H5CX_node_t *)); + ctx = (H5CX_node_t **)LocalAlloc(LPTR, sizeof(H5CX_node_t *)); #else /* Use HDmalloc here since this has to match the HDfree in the * destructor and we want to avoid the codestack there. @@ -1784,7 +1784,7 @@ H5CX_get_mpi_file_flushing(void) /*------------------------------------------------------------------------- - * Function: H5CX_get_mpio_rank0_bcast + * Function: H5CX_get_mpio_rank0_bcast * * Purpose: Retrieves if the dataset meets read-with-rank0-and-bcast requirements for the current API call context. * @@ -3120,7 +3120,7 @@ H5CX_set_mpio_actual_chunk_opt(H5D_mpio_actual_chunk_opt_mode_t mpio_actual_chun /* Sanity checks */ HDassert(head && *head); - HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || + HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || (*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT)); /* Cache the value for later, marking it to set in DXPL when context popped */ @@ -3152,7 +3152,7 @@ H5CX_set_mpio_actual_io_mode(H5D_mpio_actual_io_mode_t mpio_actual_io_mode) /* Sanity checks */ HDassert(head && *head); - HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || + HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || (*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT)); /* Cache the value for later, marking it to set in DXPL when context popped */ @@ -3256,7 +3256,7 @@ H5CX_test_set_mpio_coll_chunk_link_hard(int mpio_coll_chunk_link_hard) /* Sanity checks */ HDassert(head && *head); - HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || + HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || (*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT)); H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, mpio_coll_chunk_link_hard) @@ -3290,7 +3290,7 @@ H5CX_test_set_mpio_coll_chunk_multi_hard(int mpio_coll_chunk_multi_hard) /* Sanity checks */ HDassert(head && *head); - HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || + HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || (*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT)); H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, mpio_coll_chunk_multi_hard) @@ -3324,7 +3324,7 @@ H5CX_test_set_mpio_coll_chunk_link_num_true(int mpio_coll_chunk_link_num_true) /* Sanity checks */ HDassert(head && *head); - HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || + HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || (*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT)); H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, mpio_coll_chunk_link_num_true) @@ -3358,7 +3358,7 @@ H5CX_test_set_mpio_coll_chunk_link_num_false(int mpio_coll_chunk_link_num_false) /* Sanity checks */ HDassert(head && *head); - HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || + HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || (*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT)); H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, mpio_coll_chunk_link_num_false) @@ -3392,7 +3392,7 @@ H5CX_test_set_mpio_coll_chunk_multi_ratio_coll(int mpio_coll_chunk_multi_ratio_c /* Sanity checks */ HDassert(head && *head); - HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || + HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || (*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT)); H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, mpio_coll_chunk_multi_ratio_coll) @@ -3426,7 +3426,7 @@ H5CX_test_set_mpio_coll_chunk_multi_ratio_ind(int mpio_coll_chunk_multi_ratio_in /* Sanity checks */ HDassert(head && *head); - HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || + HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || (*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT)); H5CX_TEST_SET_PROP(H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, mpio_coll_chunk_multi_ratio_ind) @@ -3460,7 +3460,7 @@ H5CX_test_set_mpio_coll_rank0_bcast(hbool_t mpio_coll_rank0_bcast) /* Sanity checks */ HDassert(head && *head); - HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || + HDassert(!((*head)->ctx.dxpl_id == H5P_DEFAULT || (*head)->ctx.dxpl_id == H5P_DATASET_XFER_DEFAULT)); H5CX_TEST_SET_PROP(H5D_XFER_COLL_RANK0_BCAST_NAME, mpio_coll_rank0_bcast) diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c index cb1d0e2..d5599f2 100644 --- a/src/H5Cdbg.c +++ b/src/H5Cdbg.c @@ -185,7 +185,7 @@ done: /*------------------------------------------------------------------------- * Function: H5C_dump_cache_LRU * - * Purpose: Print a summary of the contents of the metadata cache + * Purpose: Print a summary of the contents of the metadata cache * LRU for debugging purposes. * * Return: Non-negative on success/Negative on failure @@ -209,9 +209,9 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name) HDassert(cache_name != NULL ); HDfprintf(stdout, "\n\nDump of metadata cache LRU \"%s\"\n", cache_name); - HDfprintf(stdout, "LRU len = %d, LRU size = %d\n", + HDfprintf(stdout, "LRU len = %d, LRU size = %d\n", cache_ptr->LRU_list_len, (int)(cache_ptr->LRU_list_size)); - HDfprintf(stdout, "index_size = %d, max_cache_size = %d, delta = %d\n\n", + HDfprintf(stdout, "index_size = %d, max_cache_size = %d, delta = %d\n\n", (int)(cache_ptr->index_size), (int)(cache_ptr->max_cache_size), (int)(cache_ptr->max_cache_size) - (int)(cache_ptr->index_size)); @@ -238,12 +238,12 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name) if(NULL == entry_ptr->tag_info) HDfprintf(stdout, " %16s ", "N/A"); else - HDfprintf(stdout, " 0x%16llx ", + HDfprintf(stdout, " 0x%16llx ", (long long)(entry_ptr->tag_info->tag)); HDfprintf(stdout, " %5lld ", (long long)(entry_ptr->size)); HDfprintf(stdout, " %d ", (int)(entry_ptr->ring)); - HDfprintf(stdout, " %2d %-32s ", (int)(entry_ptr->type->id), + HDfprintf(stdout, " %2d %-32s ", (int)(entry_ptr->type->id), (entry_ptr->type->name)); HDfprintf(stdout, " %d", (int)(entry_ptr->is_dirty)); HDfprintf(stdout, "\n"); @@ -262,8 +262,8 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name) /*------------------------------------------------------------------------- * Function: H5C_dump_cache_skip_list * - * Purpose: Debugging routine that prints a summary of the contents of - * the skip list used by the metadata cache metadata cache to + * Purpose: Debugging routine that prints a summary of the contents of + * the skip list used by the metadata cache metadata cache to * maintain an address sorted list of dirty entries. * * Return: Non-negative on success/Negative on failure @@ -343,9 +343,9 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn) /*------------------------------------------------------------------------- * Function: H5C_dump_coll_write_list * - * Purpose: Debugging routine that prints a summary of the contents of - * the collective write skip list used by the metadata cache - * in the parallel case to maintain a list of entries to write + * Purpose: Debugging routine that prints a summary of the contents of + * the collective write skip list used by the metadata cache + * in the parallel case to maintain a list of entries to write * collectively at a sync point. * * Return: Non-negative on success/Negative on failure @@ -381,7 +381,7 @@ H5C_dump_coll_write_list(H5C_t * cache_ptr, char * calling_fcn) list_len = (int)H5SL_count(cache_ptr->coll_write_list); - HDfprintf(stdout, "\n\nDumping MDC coll write list from %d:%s.\n", + HDfprintf(stdout, "\n\nDumping MDC coll write list from %d:%s.\n", aux_ptr->mpi_rank, calling_fcn); HDfprintf(stdout, " slist len = %u.\n", cache_ptr->slist_len); @@ -773,7 +773,7 @@ H5C_stats(H5C_t * cache_ptr, (((double)(cache_ptr->total_dirty_pf_entries_skipped_in_msic)) / ((double)(cache_ptr->calls_to_msic))); - HDfprintf(stdout, + HDfprintf(stdout, "%s MSIC: Average/max dirty pf entries skipped = %lf / %ld\n", cache_ptr->prefix, average_dirty_pf_entries_skipped_per_call_to_msic, @@ -798,9 +798,9 @@ H5C_stats(H5C_t * cache_ptr, (long long)(cache_ptr->total_entries_scanned_in_msic - cache_ptr->entries_scanned_to_make_space)); - HDfprintf(stdout, + HDfprintf(stdout, "%s slist/LRU/index scan restarts = %lld / %lld / %lld.\n", - cache_ptr->prefix, + cache_ptr->prefix, (long long)(cache_ptr->slist_scan_restarts), (long long)(cache_ptr->LRU_scan_restarts), (long long)(cache_ptr->index_scan_restarts)); @@ -827,7 +827,7 @@ H5C_stats(H5C_t * cache_ptr, (long long)(cache_ptr->evictions[H5AC_PREFETCHED_ENTRY_ID])); if(cache_ptr->prefetches > 0) - prefetch_use_rate = + prefetch_use_rate = (double)100.0f * ((double)(cache_ptr->prefetch_hits)) / ((double)(cache_ptr->prefetches)); else @@ -886,7 +886,7 @@ H5C_stats(H5C_t * cache_ptr, (int)(cache_ptr->max_read_protects[i])); HDfprintf(stdout, - "%s clears / flushes = %ld / %ld\n", + "%s clears / flushes = %ld / %ld\n", cache_ptr->prefix, (long)(cache_ptr->clears[i]), (long)(cache_ptr->flushes[i])); @@ -1158,21 +1158,21 @@ H5C__dump_entry(H5C_t *cache_ptr, const H5C_cache_entry_t *entry_ptr, /*------------------------------------------------------------------------- * Function: H5C_flush_dependency_exists() * - * Purpose: Test to see if a flush dependency relationship exists - * between the supplied parent and child. Both parties + * Purpose: Test to see if a flush dependency relationship exists + * between the supplied parent and child. Both parties * are indicated by addresses so as to avoid the necessity - * of protect / unprotect calls prior to this call. + * of protect / unprotect calls prior to this call. * - * If either the parent or the child is not in the metadata + * If either the parent or the child is not in the metadata * cache, the function sets *fd_exists_ptr to FALSE. * - * If both are in the cache, the childs list of parents is + * If both are in the cache, the childs list of parents is * searched for the proposed parent. If the proposed parent * is found in the childs parent list, the function sets - * *fd_exists_ptr to TRUE. In all other non-error cases, + * *fd_exists_ptr to TRUE. In all other non-error cases, * the function sets *fd_exists_ptr FALSE. * - * Return: SUCCEED on success/FAIL on failure. Note that + * Return: SUCCEED on success/FAIL on failure. Note that * *fd_exists_ptr is undefined on failure. * * Programmer: John Mainzer @@ -1355,30 +1355,30 @@ done: * * Function: H5C_get_entry_ptr_from_addr() * - * Purpose: Debugging function that attempts to look up an entry in the - * cache by its file address, and if found, returns a pointer - * to the entry in *entry_ptr_ptr. If the entry is not in the + * Purpose: Debugging function that attempts to look up an entry in the + * cache by its file address, and if found, returns a pointer + * to the entry in *entry_ptr_ptr. If the entry is not in the * cache, *entry_ptr_ptr is set to NULL. * - * WARNING: This call should be used only in debugging - * routines, and it should be avoided when + * WARNING: This call should be used only in debugging + * routines, and it should be avoided when * possible. * - * Further, if we ever multi-thread the cache, - * this routine will have to be either discarded + * Further, if we ever multi-thread the cache, + * this routine will have to be either discarded * or heavily re-worked. * - * Finally, keep in mind that the entry whose - * pointer is obtained in this fashion may not - * be in a stable state. + * Finally, keep in mind that the entry whose + * pointer is obtained in this fashion may not + * be in a stable state. * * Note that this function is only defined if NDEBUG * is not defined. * - * As heavy use of this function is almost certainly a - * bad idea, the metadata cache tracks the number of - * successful calls to this function, and (if - * H5C_DO_SANITY_CHECKS is defined) displays any + * As heavy use of this function is almost certainly a + * bad idea, the metadata cache tracks the number of + * successful calls to this function, and (if + * H5C_DO_SANITY_CHECKS is defined) displays any * non-zero count on cache shutdown. * * Return: FAIL if error is detected, SUCCEED otherwise. @@ -1425,7 +1425,7 @@ done: /*------------------------------------------------------------------------- * Function: H5C_get_serialization_in_progress * - * Purpose: Return the current value of + * Purpose: Return the current value of * cache_ptr->serialization_in_progress. * * Return: Current value of cache_ptr->serialization_in_progress. @@ -1454,7 +1454,7 @@ H5C_get_serialization_in_progress(const H5C_t *cache_ptr) * * Function: H5C_cache_is_clean() * - * Purpose: Debugging function that verifies that all rings in the + * Purpose: Debugging function that verifies that all rings in the * metadata cache are clean from the outermost ring, inwards * to the inner ring specified. * @@ -1499,16 +1499,16 @@ done: * * Function: H5C_verify_entry_type() * - * Purpose: Debugging function that attempts to look up an entry in the + * Purpose: Debugging function that attempts to look up an entry in the * cache by its file address, and if found, test to see if its * type field contains the expted value. * * If the specified entry is in cache, *in_cache_ptr is set - * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending - * on whether the entries type field matches the expected_type + * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending + * on whether the entries type field matches the expected_type * parameter. * - * If the target entry is not in cache, *in_cache_ptr is + * If the target entry is not in cache, *in_cache_ptr is * set to FALSE, and *type_ok_ptr is undefined. * * Note that this function is only defined if NDEBUG diff --git a/src/H5Cepoch.c b/src/H5Cepoch.c index 8bcab9f..6451019 100644 --- a/src/H5Cepoch.c +++ b/src/H5Cepoch.c @@ -58,7 +58,7 @@ static herr_t H5C__epoch_marker_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr); static herr_t H5C__epoch_marker_get_final_load_size(const void *image_ptr, size_t image_len_ptr, void *udata_ptr, size_t *actual_len); -static htri_t H5C__epoch_marker_verify_chksum(const void *image_ptr, +static htri_t H5C__epoch_marker_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr); static void * H5C__epoch_marker_deserialize(const void * image_ptr, size_t len, void * udata, hbool_t * dirty_ptr); @@ -71,7 +71,7 @@ static herr_t H5C__epoch_marker_serialize(const H5F_t *f, void * image_ptr, size_t len, void * thing); static herr_t H5C__epoch_marker_notify(H5C_notify_action_t action, void *thing); static herr_t H5C__epoch_marker_free_icr(void * thing); -static herr_t H5C__epoch_marker_fsf_size(const void H5_ATTR_UNUSED * thing, +static herr_t H5C__epoch_marker_fsf_size(const void H5_ATTR_UNUSED * thing, hsize_t H5_ATTR_UNUSED * fsf_size_ptr); @@ -143,8 +143,8 @@ H5C__epoch_marker_get_final_load_size(const void H5_ATTR_UNUSED *image_ptr, } /* end H5C__epoch_marker_final_get_load_size() */ -static htri_t -H5C__epoch_marker_verify_chksum(const void H5_ATTR_UNUSED *image_ptr, size_t H5_ATTR_UNUSED len, +static htri_t +H5C__epoch_marker_verify_chksum(const void H5_ATTR_UNUSED *image_ptr, size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED *udata_ptr) { FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */ @@ -228,7 +228,7 @@ H5C__epoch_marker_free_icr(void H5_ATTR_UNUSED * thing) } /* end H5C__epoch_marker_free_icr() */ -static herr_t +static herr_t H5C__epoch_marker_fsf_size(const void H5_ATTR_UNUSED * thing, hsize_t H5_ATTR_UNUSED *fsf_size_ptr) { FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */ diff --git a/src/H5Cimage.c b/src/H5Cimage.c index db44c7a..ee286d9 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -102,11 +102,11 @@ static herr_t H5C__decode_cache_image_header(const H5F_t *f, static herr_t H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint8_t **buf, unsigned entry_num); #endif /* NDEBUG */ /* only used in assertions */ -static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, +static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr, H5C_cache_entry_t **fd_children); static herr_t H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t **buf); -static herr_t H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, +static herr_t H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigned entry_num); static herr_t H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr); static void H5C__prep_for_file_close__compute_fd_heights_real( @@ -146,8 +146,8 @@ H5FL_DEFINE(H5C_cache_entry_t); * * Function: H5C_cache_image_pending() * - * Purpose: Tests to see if the load of a metadata cache image - * load is pending (i.e. will be executed on the next + * Purpose: Tests to see if the load of a metadata cache image + * load is pending (i.e. will be executed on the next * protect or insert) * * Returns TRUE if a cache image load is pending, and FALSE @@ -179,16 +179,16 @@ H5C_cache_image_pending(const H5C_t *cache_ptr) /*------------------------------------------------------------------------- * Function: H5C_cache_image_status() * - * Purpose: Examine the metadata cache associated with the supplied - * instance of H5F_t to determine whether the load of a - * cache image has either been queued or executed, and if + * Purpose: Examine the metadata cache associated with the supplied + * instance of H5F_t to determine whether the load of a + * cache image has either been queued or executed, and if * construction of a cache image has been requested. * * This done, it set *load_ci_ptr to TRUE if a cache image * has either been loaded or a load has been requested, and * to FALSE otherwise. * - * Similarly, set *write_ci_ptr to TRUE if construction of + * Similarly, set *write_ci_ptr to TRUE if construction of * a cache image has been requested, and to FALSE otherwise. * * Return: SUCCEED on success, and FAIL on failure. @@ -213,7 +213,7 @@ H5C_cache_image_status(H5F_t * f, hbool_t *load_ci_ptr, hbool_t *write_ci_ptr) HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(load_ci_ptr); HDassert(write_ci_ptr); - + *load_ci_ptr = cache_ptr->load_image || cache_ptr->image_loaded; *write_ci_ptr = cache_ptr->image_ctl.generate_image; @@ -224,7 +224,7 @@ H5C_cache_image_status(H5F_t * f, hbool_t *load_ci_ptr, hbool_t *write_ci_ptr) /*------------------------------------------------------------------------- * Function: H5C__construct_cache_image_buffer() * - * Purpose: Allocate a buffer of size cache_ptr->image_len, and + * Purpose: Allocate a buffer of size cache_ptr->image_len, and * load it with an image of the metadata cache image block. * * Note that by the time this function is called, the cache @@ -330,7 +330,7 @@ H5C__construct_cache_image_buffer(H5F_t * f, H5C_t *cache_ptr) HDassert((cache_ptr->image_entries)[u].type_id == (fake_cache_ptr->image_entries)[u].type_id); HDassert((cache_ptr->image_entries)[u].lru_rank == (fake_cache_ptr->image_entries)[u].lru_rank); HDassert((cache_ptr->image_entries)[u].is_dirty == (fake_cache_ptr->image_entries)[u].is_dirty); - /* don't check image_fd_height as it is not stored in + /* don't check image_fd_height as it is not stored in * the metadata cache image block. */ HDassert((cache_ptr->image_entries)[u].fd_child_count == (fake_cache_ptr->image_entries)[u].fd_child_count); @@ -346,7 +346,7 @@ H5C__construct_cache_image_buffer(H5F_t * f, H5C_t *cache_ptr) (fake_cache_ptr->image_entries)[u].fd_parent_addrs = (haddr_t *)H5MM_xfree((fake_cache_ptr->image_entries)[u].fd_parent_addrs); (fake_cache_ptr->image_entries)[u].fd_parent_count = 0; } /* end if */ - else + else HDassert((fake_cache_ptr->image_entries)[u].fd_parent_count == 0); HDassert((cache_ptr->image_entries)[u].image_ptr); @@ -431,28 +431,28 @@ done: * Function: H5C__deserialize_prefetched_entry() * * Purpose: Deserialize the supplied prefetched entry entry, and return - * a pointer to the deserialized entry in *entry_ptr_ptr. + * a pointer to the deserialized entry in *entry_ptr_ptr. * If successful, remove the prefetched entry from the cache, * and free it. Insert the deserialized entry into the cache. * - * Note that the on disk image of the entry is not freed -- + * Note that the on disk image of the entry is not freed -- * a pointer to it is stored in the deserialized entries' * image_ptr field, and its image_up_to_date field is set to * TRUE unless the entry is dirtied by the deserialize call. * * If the prefetched entry is a flush dependency child, - * destroy that flush dependency prior to calling the + * destroy that flush dependency prior to calling the * deserialize callback. If appropriate, the flush dependency * relationship will be recreated by the cache client. * * If the prefetched entry is a flush dependency parent, - * destroy the flush dependency relationship with all its + * destroy the flush dependency relationship with all its * children. As all these children must be prefetched entries, - * recreate these flush dependency relationships with + * recreate these flush dependency relationships with * deserialized entry after it is inserted in the cache. * - * Since deserializing a prefetched entry is semantically - * equivalent to a load, issue an entry loaded nofification + * Since deserializing a prefetched entry is semantically + * equivalent to a load, issue an entry loaded nofification * if the notify callback is defined. * * Return: SUCCEED on success, and FAIL on failure. @@ -468,22 +468,22 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t **entry_ptr_ptr, const H5C_class_t *type, haddr_t addr, void *udata) { - hbool_t dirty = FALSE; /* Flag indicating whether thing was - * dirtied during deserialize + hbool_t dirty = FALSE; /* Flag indicating whether thing was + * dirtied during deserialize */ size_t len; /* Size of image in file */ void * thing = NULL; /* Pointer to thing loaded */ H5C_cache_entry_t * pf_entry_ptr; /* pointer to the prefetched entry */ /* supplied in *entry_ptr_ptr. */ - H5C_cache_entry_t * ds_entry_ptr; /* Alias for thing loaded, as cache - * entry + H5C_cache_entry_t * ds_entry_ptr; /* Alias for thing loaded, as cache + * entry */ H5C_cache_entry_t** fd_children = NULL; /* Pointer to a dynamically */ /* allocated array of pointers to */ /* the flush dependency children of */ /* the prefetched entry, or NULL if */ /* that array does not exist. */ - unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | + unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG); int i; herr_t ret_value = SUCCEED; /* Return value */ @@ -513,8 +513,8 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, /* verify absence of prohibited or unsupported type flag combinations */ HDassert(!(type->flags & H5C__CLASS_SKIP_READS)); - - /* Can't see how skip reads could be usefully combined with + + /* Can't see how skip reads could be usefully combined with * either the speculative read flag. Hence disallow. */ HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && @@ -535,7 +535,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, HDassert(pf_entry_ptr->flush_dep_parent[i]->flush_dep_nchildren > 0); HDassert(pf_entry_ptr->fd_parent_addrs); HDassert(pf_entry_ptr->flush_dep_parent[i]->addr == pf_entry_ptr->fd_parent_addrs[i]); - + if(H5C_destroy_flush_dependency(pf_entry_ptr->flush_dep_parent[i], pf_entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry parent flush dependency") @@ -543,11 +543,11 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, } /* end for */ HDassert(pf_entry_ptr->flush_dep_nparents == 0); - /* If *pf_entry_ptr is a flush dependency parent, destroy its flush - * dependency relationships with all its children (which must be + /* If *pf_entry_ptr is a flush dependency parent, destroy its flush + * dependency relationships with all its children (which must be * prefetched entries as well). * - * These flush dependency relationships will have to be restored + * These flush dependency relationships will have to be restored * after the deserialized entry is inserted into the cache in order * to transfer these relationships to the new entry. Hence save the * pointers to the flush dependency children of *pf_enty_ptr for later @@ -561,16 +561,16 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry child flush dependency(s).") } /* end if */ - /* Since the size of the on disk image is known exactly, there is - * no need for either a call to the get_initial_load_size() callback, + /* Since the size of the on disk image is known exactly, there is + * no need for either a call to the get_initial_load_size() callback, * or retries if the H5C__CLASS_SPECULATIVE_LOAD_FLAG flag is set. * Similarly, there is no need to clamp possible reads beyond * EOF. */ len = pf_entry_ptr->size; - /* Deserialize the prefetched on-disk image of the entry into the - * native memory form + /* Deserialize the prefetched on-disk image of the entry into the + * native memory form */ if(NULL == (thing = type->deserialize(pf_entry_ptr->image_ptr, len, udata, &dirty))) HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't deserialize image") @@ -587,14 +587,14 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, * * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) ); * - * note that type ids 5 & 6 are associated with object headers in the + * note that type ids 5 & 6 are associated with object headers in the * metadata cache. * * When we get to using H5C for other purposes, we may wish to * tighten up the assert so that the loophole only applies to the * metadata cache. * - * Note that at present, dirty can't be set to true with prefetched + * Note that at present, dirty can't be set to true with prefetched * entries. However this may change, so include this functionality * against that posibility. * @@ -682,8 +682,8 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, /* We have successfully deserialized the prefetched entry. * * Before we return a pointer to the deserialized entry, we must remove - * the prefetched entry from the cache, discard it, and replace it with - * the deserialized entry. Note that we do not free the prefetched + * the prefetched entry from the cache, discard it, and replace it with + * the deserialized entry. Note that we do not free the prefetched * entries image, as that has been transferred to the deserialized * entry. * @@ -695,11 +695,11 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, * 1) Set pf_entry_ptr->image_ptr to NULL. Since we have already * transferred the buffer containing the image to *ds_entry_ptr, * this is not a memory leak. - * + * * 2) Call H5C__flush_single_entry() with the H5C__FLUSH_INVALIDATE_FLAG * and H5C__FLUSH_CLEAR_ONLY_FLAG flags set. */ - pf_entry_ptr->image_ptr = NULL; + pf_entry_ptr->image_ptr = NULL; if(pf_entry_ptr->is_dirty) { HDassert(pf_entry_ptr->in_slist); flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; @@ -723,7 +723,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, ds_entry_ptr, FAIL) - /* Deserializing a prefetched entry is the conceptual equivalent of + /* Deserializing a prefetched entry is the conceptual equivalent of * loading it from file. If the deserialized entry has a notify callback, * send an "after load" notice now that the deserialized entry is fully * integrated into the cache. @@ -732,9 +732,9 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, (ds_entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, ds_entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry loaded into cache") - /* Restore flush dependencies with the flush dependency children of - * of the prefetched entry. Note that we must protect *ds_entry_ptr - * before the call to avoid triggering sanity check failures, and + /* Restore flush dependencies with the flush dependency children of + * of the prefetched entry. Note that we must protect *ds_entry_ptr + * before the call to avoid triggering sanity check failures, and * then unprotect it afterwards. */ i = 0; @@ -798,8 +798,8 @@ done: /*------------------------------------------------------------------------- * Function: H5C__free_image_entries_array * - * Purpose: If the image entries array exists, free the image - * associated with each entry, and then free the image + * Purpose: If the image entries array exists, free the image + * associated with each entry, and then free the image * entries array proper. * * Note that by the time this function is called, the cache @@ -834,7 +834,7 @@ H5C__free_image_entries_array(H5C_t * cache_ptr) /* Get pointer to image entry */ ie_ptr = &((cache_ptr->image_entries)[u]); - /* Sanity checks */ + /* Sanity checks */ HDassert(ie_ptr); HDassert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC); HDassert(ie_ptr->image_ptr); @@ -866,14 +866,14 @@ H5C__free_image_entries_array(H5C_t * cache_ptr) /*------------------------------------------------------------------------- * Function: H5C_force_cache_image_load() * - * Purpose: On rare occasions, it is necessary to run + * Purpose: On rare occasions, it is necessary to run * H5MF_tidy_self_referential_fsm_hack() prior to the first - * metadata cache access. This is a problem as if there is a - * cache image at the end of the file, that routine will + * metadata cache access. This is a problem as if there is a + * cache image at the end of the file, that routine will * discard it. * * We solve this issue by calling this function, which will - * load the cache image and then call + * load the cache image and then call * H5MF_tidy_self_referential_fsm_hack() to discard it. * * Return: SUCCEED on success, and FAIL on failure. @@ -1053,12 +1053,12 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr) if ( ( NULL == aux_ptr ) || ( aux_ptr->mpi_rank == 0 ) ) { - HDassert((NULL == aux_ptr) || + HDassert((NULL == aux_ptr) || (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC)); #endif /* H5_HAVE_PARALLEL */ /* Read the buffer (if serial access, or rank 0 of parallel access) */ - if(H5F_block_read(f, H5FD_MEM_SUPER, cache_ptr->image_addr, + if(H5F_block_read(f, H5FD_MEM_SUPER, cache_ptr->image_addr, cache_ptr->image_len, cache_ptr->image_buffer) < 0) HGOTO_ERROR(H5E_CACHE, H5E_READERROR, FAIL, "Can't read metadata cache image block") @@ -1068,9 +1068,9 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr) if ( aux_ptr ) { /* Broadcast cache image */ - if ( MPI_SUCCESS != - (mpi_result = MPI_Bcast(cache_ptr->image_buffer, - (int)cache_ptr->image_len, MPI_BYTE, + if ( MPI_SUCCESS != + (mpi_result = MPI_Bcast(cache_ptr->image_buffer, + (int)cache_ptr->image_len, MPI_BYTE, 0, aux_ptr->mpi_comm)) ) HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result) @@ -1080,9 +1080,9 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr) else if ( aux_ptr ) { /* Retrieve the contents of the metadata cache image from process 0 */ - if ( MPI_SUCCESS != - (mpi_result = MPI_Bcast(cache_ptr->image_buffer, - (int)cache_ptr->image_len, MPI_BYTE, + if ( MPI_SUCCESS != + (mpi_result = MPI_Bcast(cache_ptr->image_buffer, + (int)cache_ptr->image_len, MPI_BYTE, 0, aux_ptr->mpi_comm)) ) HMPI_GOTO_ERROR(FAIL, "can't receive cache image MPI_Bcast", \ @@ -1128,14 +1128,14 @@ H5C__load_cache_image(H5F_t *f) HDassert(cache_ptr); HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - /* If the image address is defined, load the image, decode it, - * and insert its contents into the metadata cache. + /* If the image address is defined, load the image, decode it, + * and insert its contents into the metadata cache. * - * Note that under normal operating conditions, it is an error if the + * Note that under normal operating conditions, it is an error if the * image address is HADDR_UNDEF. However, to facilitate testing, * we allow this special value of the image address which means that - * no image exists, and that the load operation should be skipped - * silently. + * no image exists, and that the load operation should be skipped + * silently. */ if(H5F_addr_defined(cache_ptr->image_addr)) { /* Sanity checks */ @@ -1184,25 +1184,25 @@ done: /*------------------------------------------------------------------------- * Function: H5C_load_cache_image_on_next_protect() * - * Purpose: Note the fact that a metadata cache image superblock + * Purpose: Note the fact that a metadata cache image superblock * extension message exists, along with the base address * and length of the metadata cache image block. * - * Once this notification is received the metadata cache - * image block must be read, decoded, and loaded into the + * Once this notification is received the metadata cache + * image block must be read, decoded, and loaded into the * cache on the next call to H5C_protect(). * - * Further, if the file is opened R/W, the metadata cache - * image superblock extension message must be deleted from + * Further, if the file is opened R/W, the metadata cache + * image superblock extension message must be deleted from * the superblock extension and the image block freed * * Contrawise, if the file is openened R/O, the metadata * cache image superblock extension message and image block * must be left as is. Further, any dirty entries in the - * cache image block must be marked as clean to avoid + * cache image block must be marked as clean to avoid * attempts to write them on file close. * - * Return: SUCCEED + * Return: SUCCEED * * Programmer: John Mainzer * 7/6/15 @@ -1291,41 +1291,41 @@ H5C__image_entry_cmp(const void *_entry1, const void *_entry2) /*------------------------------------------------------------------------- * Function: H5C__prep_image_for_file_close * - * Purpose: The objective of the call is to allow the metadata cache - * to do any preparatory work prior to generation of a + * Purpose: The objective of the call is to allow the metadata cache + * to do any preparatory work prior to generation of a * cache image. * - * In particular, the cache must + * In particular, the cache must * * 1) serialize all its entries, * - * 2) compute the size of the metadata cache image, + * 2) compute the size of the metadata cache image, * * 3) allocate space for the metadata cache image, and * * 4) setup the metadata cache image superblock extension - * message with the address and size of the metadata + * message with the address and size of the metadata * cache image. * - * The parallel case is complicated by the fact that - * while all metadata caches must contain the same set of - * dirty entries, there is no such requirement for clean + * The parallel case is complicated by the fact that + * while all metadata caches must contain the same set of + * dirty entries, there is no such requirement for clean * entries or the order that entries appear in the LRU. * * Thus, there is no requirement that different processes * will construct cache images of the same size. * - * This is not a major issue as long as all processes include - * the same set of dirty entries in the cache -- as they - * currently do (note that this will change when we implement - * the ageout feature). Since only the process zero cache - * writes the cache image, all that is necessary is to - * broadcast the process zero cache size for use in the - * superblock extension messages and cache image block + * This is not a major issue as long as all processes include + * the same set of dirty entries in the cache -- as they + * currently do (note that this will change when we implement + * the ageout feature). Since only the process zero cache + * writes the cache image, all that is necessary is to + * broadcast the process zero cache size for use in the + * superblock extension messages and cache image block * allocations. * - * Note: At present, cache image is disabled in the - * parallel case as the new collective metadata write + * Note: At present, cache image is disabled in the + * parallel case as the new collective metadata write * code must be modified to support cache image. * * Return: Non-negative on success/Negative on failure @@ -1354,8 +1354,8 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(image_generated); - /* If the file is opened and closed without any access to - * any group or data set, it is possible that the cache image (if + /* If the file is opened and closed without any access to + * any group or data set, it is possible that the cache image (if * it exists) has not been read yet. Do this now if required. */ if(cache_ptr->load_image) { @@ -1365,15 +1365,15 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) } /* end if */ /* Before we start to generate the cache image (if requested), verify - * that the superblock supports superblock extension messages, and + * that the superblock supports superblock extension messages, and * silently cancel any request for a cache image if it does not. * * Ideally, we would do this when the cache image is requested, - * but the necessary information is not necessary available at that + * but the necessary information is not necessary available at that * time -- hence this last minute check. * - * Note that under some error conditions, the superblock will be - * undefined in this case as well -- if so, assume that the + * Note that under some error conditions, the superblock will be + * undefined in this case as well -- if so, assume that the * superblock does not support superblock extension messages. * Also verify that the file's high_bound is at least release * 1.10.x, otherwise cancel the request for a cache image @@ -1390,17 +1390,17 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) /* Generate the cache image, if requested */ if(cache_ptr->image_ctl.generate_image) { /* Create the cache image super block extension message. - * + * * Note that the base address and length of the metadata cache * image are undefined at this point, and thus will have to be * updated later. * - * Create the super block extension message now so that space + * Create the super block extension message now so that space * is allocated for it (if necessary) before we allocate space * for the cache image block. * - * To simplify testing, do this only if the - * H5C_CI__GEN_MDCI_SBE_MESG bit is set in + * To simplify testing, do this only if the + * H5C_CI__GEN_MDCI_SBE_MESG bit is set in * cache_ptr->image_ctl.flags. */ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDCI_SBE_MESG) @@ -1411,20 +1411,20 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) if(H5C__serialize_cache(f) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "serialization of the cache failed") - /* Scan the cache and record data needed to construct the + /* Scan the cache and record data needed to construct the * cache image. In particular, for each entry we must record: * * 1) rank in LRU (if entry is in LRU) * - * 2) Whether the entry is dirty prior to flush of + * 2) Whether the entry is dirty prior to flush of * cache just prior to close. * * 3) Addresses of flush dependency parents (if any). * - * 4) Number of flush dependency children (if any). + * 4) Number of flush dependency children (if any). * - * In passing, also compute the size of the metadata cache - * image. With the recent modifications of the free space + * In passing, also compute the size of the metadata cache + * image. With the recent modifications of the free space * manager code, this size should be correct. */ if(H5C__prep_for_file_close__scan_entries(f, cache_ptr) < 0) @@ -1432,7 +1432,7 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) HDassert(HADDR_UNDEF == cache_ptr->image_addr); #ifdef H5_HAVE_PARALLEL - /* In the parallel case, overwrite the image_len with the + /* In the parallel case, overwrite the image_len with the * value computed by process 0. */ if(cache_ptr->aux_ptr) { /* we have multiple processes */ @@ -1453,17 +1453,17 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) else { if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm))) HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result) - + aux_ptr->p0_image_len = p0_image_len; } /* end else */ - /* Allocate space for a cache image of size equal to that - * computed by the process 0. This may be different from + /* Allocate space for a cache image of size equal to that + * computed by the process 0. This may be different from * cache_ptr->image_data_len if mpi_rank != 0. However, since - * cache image write is suppressed on all processes other than + * cache image write is suppressed on all processes other than * process 0, this doesn't matter. * - * Note that we allocate the cache image directly from the file + * Note that we allocate the cache image directly from the file * driver so as to avoid unsettling the free space managers. */ if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, H5FD_MEM_SUPER, f, @@ -1472,8 +1472,8 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) } /* end if */ else #endif /* H5_HAVE_PARALLEL */ - /* Allocate the cache image block. Note that we allocate this - * this space directly from the file driver so as to avoid + /* Allocate the cache image block. Note that we allocate this + * this space directly from the file driver so as to avoid * unsettling the free space managers. */ if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, H5FD_MEM_SUPER, f, @@ -1497,25 +1497,25 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) * image block to the next alignment boundary, and then setting * the image_data_len to the actual size of the cache_image. * - * On the off chance that there is some other way to get a + * On the off chance that there is some other way to get a * a fragment on a cache image allocation, leave the following * assertion in the code so we will find out. */ HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1)); /* Eventually it will be possible for the length of the cache image - * block on file to be greater than the size of the data it - * contains. However, for now they must be the same. Set + * block on file to be greater than the size of the data it + * contains. However, for now they must be the same. Set * cache_ptr->image_len accordingly. */ cache_ptr->image_len = cache_ptr->image_data_len; - /* update the metadata cache image superblock extension - * message with the new cache image block base address and + /* update the metadata cache image superblock extension + * message with the new cache image block base address and * length. * - * to simplify testing, do this only if the - * H5C_CI__GEN_MDC_IMAGE_BLK bit is set in + * to simplify testing, do this only if the + * H5C_CI__GEN_MDC_IMAGE_BLK bit is set in * cache_ptr->image_ctl.flags. */ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK) @@ -1525,18 +1525,18 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) /* At this point: * * 1) space in the file for the metadata cache image - * is allocated, + * is allocated, * - * 2) the metadata cache image superblock extension - * message exists and (if so configured) contains + * 2) the metadata cache image superblock extension + * message exists and (if so configured) contains * the correct data, * - * 3) All entries in the cache that will appear in the + * 3) All entries in the cache that will appear in the * cache image are serialized with up to date images. * * Since we just updated the cache image message, * the super block extension message is dirty. However, - * since the superblock and the superblock extension + * since the superblock and the superblock extension * can't be included in the cache image, this is a non- * issue. * @@ -1544,16 +1544,16 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) * the cache are marked as such, and we have a count * of same. * - * 5) Flush dependency heights are calculated for all + * 5) Flush dependency heights are calculated for all * entries that will be included in the cache image. * * If there are any entries to be included in the metadata cache - * image, allocate, populate, and sort the image_entries array. + * image, allocate, populate, and sort the image_entries array. * - * If the metadata cache image will be empty, delete the - * metadata cache image superblock extension message, set + * If the metadata cache image will be empty, delete the + * metadata cache image superblock extension message, set * cache_ptr->image_ctl.generate_image to FALSE. This will - * allow the file close to continue normally without the + * allow the file close to continue normally without the * unnecessary generation of the metadata cache image. */ if(cache_ptr->num_entries_in_image > 0) { @@ -1567,9 +1567,9 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) else { /* cancel creation of metadata cache image */ HDassert(cache_ptr->image_entries == NULL); - /* To avoid breaking the control flow tests, only delete - * the mdci superblock extension message if the - * H5C_CI__GEN_MDC_IMAGE_BLK flag is set in + /* To avoid breaking the control flow tests, only delete + * the mdci superblock extension message if the + * H5C_CI__GEN_MDC_IMAGE_BLK flag is set in * cache_ptr->image_ctl.flags. */ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK) @@ -1591,8 +1591,8 @@ done: /*------------------------------------------------------------------------- * Function: H5C_set_cache_image_config * - * Purpose: If *config_ptr contains valid data, copy it into the - * image_ctl field of *cache_ptr. Make adjustments for + * Purpose: If *config_ptr contains valid data, copy it into the + * image_ctl field of *cache_ptr. Make adjustments for * changes in configuration as required. * * If the file is open read only, silently @@ -1601,9 +1601,9 @@ done: * * Note that in addition to being inapplicable in the * read only case, cache image is also inapplicable if - * the superblock does not support superblock extension - * messages. Unfortunately, this information need not - * be available at this point. Thus we check for this + * the superblock does not support superblock extension + * messages. Unfortunately, this information need not + * be available at this point. Thus we check for this * later, in H5C_prep_for_file_close() and cancel the * cache image request if appropriate. * @@ -1638,7 +1638,7 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid cache image configuration") #ifdef H5_HAVE_PARALLEL - /* The collective metadata write code is not currently compatible + /* The collective metadata write code is not currently compatible * with cache image. Until this is fixed, suppress cache image silently * if there is more than one process. * JRM -- 11/8/16 @@ -1652,15 +1652,15 @@ H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, else { #endif /* H5_HAVE_PARALLEL */ /* A cache image can only be generated if the file is opened read / write - * and the superblock supports superblock extension messages. + * and the superblock supports superblock extension messages. * - * However, the superblock version is not available at this point -- + * However, the superblock version is not available at this point -- * hence we can only check the former requirement now. Do the latter - * check just before we construct the image.. + * check just before we construct the image.. * * If the file is opened read / write, apply the supplied configuration. * - * If it is not, set the image configuration to the default, which has + * If it is not, set the image configuration to the default, which has * the effect of silently disabling the cache image if it was requested. */ if(H5F_INTENT(f) & H5F_ACC_RDWR) @@ -1683,7 +1683,7 @@ done: /*------------------------------------------------------------------------- * Function: H5C_validate_cache_image_config() * - * Purpose: Run a sanity check on the provided instance of struct + * Purpose: Run a sanity check on the provided instance of struct * H5AC_cache_image_config_t. * * Do nothing and return SUCCEED if no errors are detected, @@ -1715,8 +1715,8 @@ H5C_validate_cache_image_config(H5C_cache_image_ctl_t * ctl_ptr) if(ctl_ptr->save_resize_status != FALSE) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unexpected value in save_resize_status field") - /* At present, we do not support prefetched entry ageouts. Thus - * the entry_ageout field must be set to + /* At present, we do not support prefetched entry ageouts. Thus + * the entry_ageout field must be set to * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE. */ if(ctl_ptr->entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) @@ -1740,7 +1740,7 @@ done: * Purpose: Compute the size of the header of the metadata cache * image block, and return the value. * - * Return: Size of the header section of the metadata cache image + * Return: Size of the header section of the metadata cache image * block in bytes. * * Programmer: John Mainzer @@ -1777,7 +1777,7 @@ H5C__cache_image_block_entry_header_size(const H5F_t * f) * Purpose: Compute the size of the header of the metadata cache * image block, and return the value. * - * Return: Size of the header section of the metadata cache image + * Return: Size of the header section of the metadata cache image * block in bytes. * * Programmer: John Mainzer @@ -1806,9 +1806,9 @@ H5C__cache_image_block_header_size(const H5F_t * f) /*------------------------------------------------------------------------- * Function: H5C__decode_cache_image_header() * - * Purpose: Decode the metadata cache image buffer header from the + * Purpose: Decode the metadata cache image buffer header from the * supplied buffer and load the data into the supplied instance - * of H5C_t. Advances the buffer pointer to the first byte + * of H5C_t. Advances the buffer pointer to the first byte * after the header image, or unchanged on failure. * * Return: Non-negative on success/Negative on failure @@ -1853,7 +1853,7 @@ H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, /* Decode flags */ flags = *p++; - if(flags & H5C__MDCI_HEADER_HAVE_RESIZE_STATUS) + if(flags & H5C__MDCI_HEADER_HAVE_RESIZE_STATUS) have_resize_status = TRUE; if(have_resize_status) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "MDC resize status not yet supported") @@ -1867,7 +1867,7 @@ H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, /* Read num entries */ UINT32DECODE(p, cache_ptr->num_entries_in_image); - if(cache_ptr->num_entries_in_image == 0) + if(cache_ptr->num_entries_in_image == 0) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache entry count") /* Verify expected length of header */ @@ -1888,13 +1888,13 @@ done: /*------------------------------------------------------------------------- * Function: H5C__decode_cache_image_entry() * - * Purpose: Decode the metadata cache image entry from the supplied + * Purpose: Decode the metadata cache image entry from the supplied * buffer into the supplied instance of H5C_image_entry_t. * This includes allocating a buffer for the entry image, - * loading it, and seting ie_ptr->image_ptr to point to + * loading it, and seting ie_ptr->image_ptr to point to * the buffer. * - * Advances the buffer pointer to the first byte + * Advances the buffer pointer to the first byte * after the entry, or unchanged on failure. * * Return: Non-negative on success/Negative on failure @@ -1998,8 +1998,8 @@ H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, /* Verify expected length of entry image */ if((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f)) HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, FAIL, "Bad entry image len") - - /* If parent count greater than zero, allocate array for parent + + /* If parent count greater than zero, allocate array for parent * addresses, and decode addresses into the array. */ if(fd_parent_count > 0) { @@ -2053,9 +2053,9 @@ done: /*------------------------------------------------------------------------- * Function: H5C__destroy_pf_entry_child_flush_deps() * - * Purpose: Destroy all flush dependencies in this the supplied + * Purpose: Destroy all flush dependencies in this the supplied * prefetched entry is the parent. Note that the children - * in these flush dependencies must be prefetched entries as + * in these flush dependencies must be prefetched entries as * well. * * As this action is part of the process of transferring all @@ -2063,8 +2063,8 @@ done: * prefetched entry, ensure that the data necessary to complete * the transfer is retained. * - * Note: The current implementation of this function is - * quite inefficient -- mostly due to the current + * Note: The current implementation of this function is + * quite inefficient -- mostly due to the current * implementation of flush dependencies. This should * be fixed at some point. * @@ -2076,7 +2076,7 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, +H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr, H5C_cache_entry_t **fd_children) { H5C_cache_entry_t * entry_ptr; @@ -2103,8 +2103,8 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, while(entry_ptr != NULL) { HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - /* Here we look at entry_ptr->flush_dep_nparents and not - * entry_ptr->fd_parent_count as it is possible that some + /* Here we look at entry_ptr->flush_dep_nparents and not + * entry_ptr->fd_parent_count as it is possible that some * or all of the prefetched flush dependency child relationships * have already been destroyed. */ @@ -2145,9 +2145,9 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry child flush dependency") #ifndef NDEBUG - /* Sanity check -- verify that the address of the parent + /* Sanity check -- verify that the address of the parent * appears in entry_ptr->fd_parent_addrs. Must do a search, - * as with flush dependency creates and destroys, + * as with flush dependency creates and destroys, * entry_ptr->fd_parent_addrs and entry_ptr->flush_dep_parent * can list parents in different order. */ @@ -2181,8 +2181,8 @@ done: /*------------------------------------------------------------------------- * Function: H5C__encode_cache_image_header() * - * Purpose: Encode the metadata cache image buffer header in the - * supplied buffer. Updates buffer pointer to the first byte + * Purpose: Encode the metadata cache image buffer header in the + * supplied buffer. Updates buffer pointer to the first byte * after the header image in the buffer, or unchanged on failure. * * Return: Non-negative on success/Negative on failure @@ -2259,8 +2259,8 @@ done: /*------------------------------------------------------------------------- * Function: H5C__encode_cache_image_entry() * - * Purpose: Encode the metadata cache image buffer header in the - * supplied buffer. Updates buffer pointer to the first byte + * Purpose: Encode the metadata cache image buffer header in the + * supplied buffer. Updates buffer pointer to the first byte * after the entry in the buffer, or unchanged on failure. * * Return: Non-negative on success/Negative on failure @@ -2271,7 +2271,7 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, +H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigned entry_num) { H5C_image_entry_t * ie_ptr; /* Pointer to entry to encode */ @@ -2306,13 +2306,13 @@ H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, *p++ = (uint8_t)(ie_ptr->type_id); /* Compose and encode flags */ - if(ie_ptr->is_dirty) + if(ie_ptr->is_dirty) flags |= H5C__MDCI_ENTRY_DIRTY_FLAG; - if(ie_ptr->lru_rank > 0) + if(ie_ptr->lru_rank > 0) flags |= H5C__MDCI_ENTRY_IN_LRU_FLAG; if(ie_ptr->fd_child_count > 0) flags |= H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG; - if(ie_ptr->fd_parent_count > 0) + if(ie_ptr->fd_parent_count > 0) flags |= H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG; *p++ = flags; @@ -2377,51 +2377,51 @@ done: * flush dependency children. (Recall that the flush dependency * height of an entry in a flush dependency relationship is the * length of the longest path from the entry to a leaf entry -- - * that is an entry with flush dependency parents, but no - * flush dependency children. With the introduction of the + * that is an entry with flush dependency parents, but no + * flush dependency children. With the introduction of the * possibility of multiple flush dependency parents, we have - * a flush partial dependency latice, not a flush dependency - * tree. But since the partial latice is acyclic, the concept + * a flush partial dependency latice, not a flush dependency + * tree. But since the partial latice is acyclic, the concept * of flush dependency height still makes sense. * - * The purpose of this function is to compute the flush + * The purpose of this function is to compute the flush * dependency height of all entries that appear in the cache - * image. + * image. * - * At present, entries are included or excluded from the + * At present, entries are included or excluded from the * cache image depending upon the ring in which they reside. * Thus there is no chance that one side of a flush dependency * will be in the cache image, and the other side not. * * However, once we start placing a limit on the size of the * cache image, or start excluding prefetched entries from - * the cache image if they haven't been accessed in some - * number of file close / open cycles, this will no longer - * be the case. + * the cache image if they haven't been accessed in some + * number of file close / open cycles, this will no longer + * be the case. * * In particular, if a flush dependency child is dirty, and * one of its flush dependency parents is dirty and not in * the cache image, then the flush dependency child cannot * be in the cache image without violating flush ordering. * - * Observe that a clean flush dependency child can be either - * in or out of the cache image without effect on flush + * Observe that a clean flush dependency child can be either + * in or out of the cache image without effect on flush * dependencies. * - * Similarly, a flush dependency parent can always be part - * of a cache image, regardless of whether it is clean or + * Similarly, a flush dependency parent can always be part + * of a cache image, regardless of whether it is clean or * dirty -- but remember that a flush dependency parent can * also be a flush dependency child. - * + * * Finally, note that for purposes of the cache image, flush - * dependency height ends when a flush dependecy relation + * dependency height ends when a flush dependecy relation * passes off the cache image. * - * On exit, the flush dependency height of each entry in the + * On exit, the flush dependency height of each entry in the * cache image should be calculated and stored in the cache * entry. Entries will be removed from the cache image if * necessary to maintain flush ordering. - * + * * Return: Non-negative on success/Negative on failure * * Programmer: John Mainzer @@ -2447,10 +2447,10 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) HDassert(cache_ptr); HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); - /* Remove from the cache image all dirty entries that are + /* Remove from the cache image all dirty entries that are * flush dependency children of dirty entries that are not in the - * cache image. Must do this, as if we fail to do so, the parent - * will be written to file before the child. Since it is possible + * cache image. Must do this, as if we fail to do so, the parent + * will be written to file before the child. Since it is possible * that the child will have dirty children of its own, this may take * multiple passes through the index list. */ @@ -2484,17 +2484,17 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) entry_ptr = entry_ptr->il_next; } /* while ( entry_ptr != NULL ) */ - } /* while ( ! done ) */ + } /* while ( ! done ) */ /* at present, entries are included in the cache image if they reside - * in a specified set of rings. Thus it should be impossible for - * entries_removed_from_image to be positive. Assert that this is - * so. Note that this will change when we start aging entries out + * in a specified set of rings. Thus it should be impossible for + * entries_removed_from_image to be positive. Assert that this is + * so. Note that this will change when we start aging entries out * of the cache image. */ HDassert(entries_removed_from_image == 0); - /* Next, remove from entries in the cache image, references to + /* Next, remove from entries in the cache image, references to * flush dependency parents or children that are not in the cache image. */ entry_ptr = cache_ptr->il_head; @@ -2575,14 +2575,14 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) entry_ptr = entry_ptr->il_next; } /* while (entry_ptr != NULL) */ - /* At present, no extenal parent or child flush dependency links + /* At present, no extenal parent or child flush dependency links * should exist -- hence the following assertions. This will change * if we support ageout of entries in the cache image. */ HDassert(external_child_fd_refs_removed == 0); HDassert(external_parent_fd_refs_removed == 0); - /* At this point we should have removed all flush dependencies that + /* At this point we should have removed all flush dependencies that * cross cache image boundaries. Now compute the flush dependency * heights for all entries in the image. * @@ -2600,7 +2600,7 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr) parent_ptr = entry_ptr->flush_dep_parent[u]; HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); - if(parent_ptr->include_in_image && parent_ptr->image_fd_height <= 0) + if(parent_ptr->include_in_image && parent_ptr->image_fd_height <= 0) H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, 1); } /* end for */ } /* end if */ @@ -2623,37 +2623,37 @@ done: * * The basic observation behind this function is as follows: * - * Suppose you have an entry E with a flush dependency - * height of X. Then the parents of E must all have + * Suppose you have an entry E with a flush dependency + * height of X. Then the parents of E must all have * flush dependency X + 1 or greater. * * Use this observation to compute flush dependency height * of all entries in the cache image via the following * recursive algorithm: * - * 1) On entry, set the flush dependency height of the + * 1) On entry, set the flush dependency height of the * supplied cache entry to the supplied value. * - * 2) Examine all the flush dependency parents of the - * supplied entry. + * 2) Examine all the flush dependency parents of the + * supplied entry. * - * If the parent is in the cache image, and has flush + * If the parent is in the cache image, and has flush * dependency height less than or equal to the flush - * dependency height of the current entry, call the + * dependency height of the current entry, call the * recursive routine on the parent with flush dependency - * height equal to the flush dependency height of the + * height equal to the flush dependency height of the * child plus 1. * * Otherwise do nothing. * * Observe that if the flush dependency height of all entries - * in the image is initialized to zero, and if this recursive - * function is called with flush dependency height 0 on all - * entries in the cache image with FD parents in the image, - * but without FD children in the image, the correct flush - * dependency height should be set for all entries in the + * in the image is initialized to zero, and if this recursive + * function is called with flush dependency height 0 on all + * entries in the cache image with FD parents in the image, + * but without FD children in the image, the correct flush + * dependency height should be set for all entries in the * cache image. - * + * * Return: void * * Programmer: John Mainzer @@ -2698,9 +2698,9 @@ H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_t *entry_ptr, * Function: H5C__prep_for_file_close__setup_image_entries_array * * Purpose: Allocate space for the image_entries array, and load - * each instance of H5C_image_entry_t in the array with + * each instance of H5C_image_entry_t in the array with * the data necessary to construct the metadata cache image. - * + * * Return: Non-negative on success/Negative on failure * * Programmer: John Mainzer @@ -2758,14 +2758,14 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) image_entries[u].ring = entry_ptr->ring; /* When a prefetched entry is included in the image, store - * its underlying type id in the image entry, not + * its underlying type id in the image entry, not * H5AC_PREFETCHED_ENTRY_ID. In passing, also increment * the age (up to H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX). */ if(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID) { image_entries[u].type_id = entry_ptr->prefetch_type_id; image_entries[u].age = entry_ptr->age + 1; - + if(image_entries[u].age > H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX) image_entries[u].age = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX; } /* end if */ @@ -2780,13 +2780,13 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr) image_entries[u].fd_parent_count = entry_ptr->fd_parent_count; image_entries[u].fd_parent_addrs = entry_ptr->fd_parent_addrs; image_entries[u].fd_child_count = entry_ptr->fd_child_count; - image_entries[u].fd_dirty_child_count = + image_entries[u].fd_dirty_child_count = entry_ptr->fd_dirty_child_count; image_entries[u].image_ptr = entry_ptr->image_ptr; - /* Null out entry_ptr->fd_parent_addrs and set + /* Null out entry_ptr->fd_parent_addrs and set * entry_ptr->fd_parent_count to zero so that ownership of the - * flush dependency parents address array is transferred to the + * flush dependency parents address array is transferred to the * image entry. */ entry_ptr->fd_parent_count = 0; @@ -2819,8 +2819,8 @@ done: /*------------------------------------------------------------------------- * Function: H5C__prep_for_file_close__scan_entries * - * Purpose: Scan all entries in the metadata cache, and store all - * entry specific data required for construction of the + * Purpose: Scan all entries in the metadata cache, and store all + * entry specific data required for construction of the * metadata cache image block and likely to be discarded * or modified during the cache flush on file close. * @@ -2838,7 +2838,7 @@ done: * * Finally, compute the size of the metadata cache image * block. - * + * * Return: Non-negative on success/Negative on failure * * Programmer: John Mainzer @@ -2891,7 +2891,7 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) HDassert(entry_ptr->image_ptr); /* Initially, we mark all entries in the rings included - * in the cache image as being included in the in the + * in the cache image as being included in the in the * image. Depending on circumstances, we may exclude some * of these entries later. */ @@ -2907,7 +2907,7 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) entry_ptr->image_fd_height = 0; /* will compute this later */ /* Initially, include all flush dependency parents in the - * the list of flush dependencies to be stored in the + * the list of flush dependencies to be stored in the * image. We may remove some or all of these later. */ if(entry_ptr->flush_dep_nparents > 0) { @@ -2915,7 +2915,7 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) * as needed. */ if(entry_ptr->flush_dep_nparents == entry_ptr->fd_parent_count ) { - /* parent addresses array should already be allocated + /* parent addresses array should already be allocated * and of the correct size. */ HDassert(entry_ptr->fd_parent_addrs); @@ -2947,8 +2947,8 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) HDassert(entry_ptr->fd_parent_addrs == NULL); /* Initially, all flush dependency children are included int - * the count of flush dependency child relationships to be - * represented in the cache image. Some or all of these + * the count of flush dependency child relationships to be + * represented in the cache image. Some or all of these * may be dropped from the image later. */ if(entry_ptr->flush_dep_nchildren > 0) { @@ -2970,23 +2970,23 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) /* Now compute the flush dependency heights of all flush dependency * relationships to be represented in the image. * - * If all entries in the target rings are included in the - * image, the flush dependency heights are simply the heights + * If all entries in the target rings are included in the + * image, the flush dependency heights are simply the heights * of all flush dependencies in the target rings. * - * However, if we restrict appearance in the cache image either - * by number of entries in the image, restrictions on the number - * of times a prefetched entry can appear in an image, or image + * However, if we restrict appearance in the cache image either + * by number of entries in the image, restrictions on the number + * of times a prefetched entry can appear in an image, or image * size, it is possible that flush dependency parents or children * of entries that are in the image may not be included in the - * the image. In this case, we must prune all flush dependency - * relationships that cross the image boundary, and all exclude - * from the image all dirty flush dependency children that have - * a dirty flush dependency parent that is not in the image. + * the image. In this case, we must prune all flush dependency + * relationships that cross the image boundary, and all exclude + * from the image all dirty flush dependency children that have + * a dirty flush dependency parent that is not in the image. * This is necessary to preserve the required flush ordering. - * - * These details are tended to by the following call to - * H5C__prep_for_file_close__compute_fd_heights(). Because the + * + * These details are tended to by the following call to + * H5C__prep_for_file_close__compute_fd_heights(). Because the * exact contents of the image cannot be known until after this * call, computation of the image size is delayed. */ @@ -2994,7 +2994,7 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "computation of flush dependency heights failed?!?") /* At this point, all entries that will appear in the cache - * image should be marked correctly. Compute the size of the + * image should be marked correctly. Compute the size of the * cache image. */ entries_visited = 0; @@ -3031,11 +3031,11 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) /* Now scan the LRU list to set the lru_rank fields of all entries * on the LRU. * - * Note that we start with rank 1, and increment by 1 with each - * entry on the LRU. + * Note that we start with rank 1, and increment by 1 with each + * entry on the LRU. * * Note that manually pinned entryies will have lru_rank -1, - * and no flush dependency. Putting these entries at the head of + * and no flush dependency. Putting these entries at the head of * the reconstructed LRU should be appropriate. */ entry_ptr = cache_ptr->LRU_head_ptr; @@ -3045,11 +3045,11 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr) /* to avoid confusion, don't set lru_rank on epoch markers. * Note that we still increment the lru_rank, so that the holes - * in the sequence of entries on the LRU will indicate the - * locations of epoch markers (if any) when we reconstruct + * in the sequence of entries on the LRU will indicate the + * locations of epoch markers (if any) when we reconstruct * the LRU. * - * Do not set lru_rank or increment lru_rank for entries + * Do not set lru_rank or increment lru_rank for entries * that will not be included in the cache image. */ if(entry_ptr->type->id == H5AC_EPOCH_MARKER_ID) @@ -3076,9 +3076,9 @@ done: * Function: H5C__reconstruct_cache_contents() * * Purpose: Scan the image buffer, and create a prefetched - * cache entry for every entry in the buffer. Insert the - * prefetched entries in the index and the LRU, and - * reconstruct any flush dependencies. Order the entries + * cache entry for every entry in the buffer. Insert the + * prefetched entries in the index and the LRU, and + * reconstruct any flush dependencies. Order the entries * in the LRU as indicated by the stored lru_ranks. * * Return: SUCCEED on success, and FAIL on failure. @@ -3127,11 +3127,11 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) if(NULL == (pf_entry_ptr = H5C__reconstruct_cache_entry(f, cache_ptr, &p))) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "reconstruction of cache entry failed") - /* Note that we make no checks on available cache space before + /* Note that we make no checks on available cache space before * inserting the reconstructed entry into the metadata cache. * * This is OK since the cache must be almost empty at the beginning - * of the process, and since we check cache size at the end of the + * of the process, and since we check cache size at the end of the * reconstruction process. */ @@ -3147,7 +3147,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, pf_entry_ptr->is_dirty) - /* If the prefetched entry is the child in one or more flush + /* If the prefetched entry is the child in one or more flush * dependency relationships, recreate those flush dependencies. */ for(v = 0; v < pf_entry_ptr->fd_parent_count; v++) { @@ -3171,7 +3171,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) */ H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, parent_ptr, FAIL) parent_ptr->is_protected = TRUE; - + /* Setup the flush dependency */ if(H5C_create_flush_dependency(parent_ptr, pf_entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore flush dependency") @@ -3211,7 +3211,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) pf_entry_ptr = pf_entry_ptr->il_next; } /* end while */ - /* Scan the LRU, and verify the expected ordering of the + /* Scan the LRU, and verify the expected ordering of the * prefetched entries. */ { @@ -3233,7 +3233,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) HDassert((entry_ptr->lru_rank == -1) || (entry_ptr->lru_rank > i)); - if ( ( entry_ptr->lru_rank > 1 ) && + if ( ( entry_ptr->lru_rank > 1 ) && ( entry_ptr->lru_rank > i + 1 ) ) lru_rank_holes += entry_ptr->lru_rank - (i + 1); @@ -3245,9 +3245,9 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) entry_ptr = entry_ptr->next; } /* end while */ - /* Holes in the sequences of LRU ranks can appear due to epoch + /* Holes in the sequences of LRU ranks can appear due to epoch * markers. They are left in to allow re-insertion of the - * epoch markers on reconstruction of the cache -- thus + * epoch markers on reconstruction of the cache -- thus * the following sanity check will have to be revised when * we add code to store and restore adaptive resize status. */ @@ -3255,7 +3255,7 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr) } /* end block */ #endif /* NDEBUG */ - /* Check to see if the cache is oversize, and evict entries as + /* Check to see if the cache is oversize, and evict entries as * necessary to remain within limits. */ if(cache_ptr->index_size >= cache_ptr->max_cache_size) { @@ -3289,7 +3289,7 @@ done: * Return a pointer to the newly allocated cache entry, * or NULL on failure. * - * Return: Pointer to the new instance of H5C_cache_entry on success, + * Return: Pointer to the new instance of H5C_cache_entry on success, * or NULL on failure. * * Programmer: John Mainzer @@ -3347,7 +3347,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, is_fd_child = TRUE; #endif /* NDEBUG */ /* only used in assertions */ - /* Force dirty entries to clean if the file read only -- must do + /* Force dirty entries to clean if the file read only -- must do * this as otherwise the cache will attempt to write them on file * close. Since the file is R/O, the metadata cache image superblock * extension message and the cache image block will not be removed. @@ -3355,7 +3355,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, * * However, if the dirty entry (marked clean for purposes of the R/O * file open) is evicted and then referred to, the cache will read - * either invalid or obsolete data from the file. Handle this by + * either invalid or obsolete data from the file. Handle this by * setting the prefetched_dirty field, and hiding such entries from * the eviction candidate selection algorithm. */ @@ -3375,7 +3375,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, /* Decode dirty dependency child count */ UINT16DECODE(p, pf_entry_ptr->fd_dirty_child_count); - if(!file_is_rw) + if(!file_is_rw) pf_entry_ptr->fd_dirty_child_count = 0; if(pf_entry_ptr->fd_dirty_child_count > pf_entry_ptr->fd_child_count) HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid dirty flush dependency child count") @@ -3401,8 +3401,8 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, /* Verify expected length of entry image */ if((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f)) HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, NULL, "Bad entry image len") - - /* If parent count greater than zero, allocate array for parent + + /* If parent count greater than zero, allocate array for parent * addresses, and decode addresses into the array. */ if(pf_entry_ptr->fd_parent_count > 0) { @@ -3457,7 +3457,7 @@ done: /*------------------------------------------------------------------------- * Function: H5C__write_cache_image_superblock_msg * - * Purpose: Write the cache image superblock extension message, + * Purpose: Write the cache image superblock extension message, * creating if specified. * * In general, the size and location of the cache image block @@ -3560,7 +3560,7 @@ H5C__write_cache_image(H5F_t *f, const H5C_t *cache_ptr) } /* end if */ } /* end block */ #endif /* H5_HAVE_PARALLEL */ - + done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__write_cache_image() */ diff --git a/src/H5Clog.c b/src/H5Clog.c index cf9b7e8..794d6bb 100644 --- a/src/H5Clog.c +++ b/src/H5Clog.c @@ -313,7 +313,7 @@ H5C_get_logging_status(const H5C_t *cache, /*OUT*/ hbool_t *is_enabled, *------------------------------------------------------------------------- */ herr_t -H5C_log_write_create_cache_msg(H5C_t *cache, herr_t fxn_ret_value) +H5C_log_write_create_cache_msg(H5C_t *cache, herr_t fxn_ret_value) { herr_t ret_value = SUCCEED; /* Return value */ @@ -348,7 +348,7 @@ done: *------------------------------------------------------------------------- */ herr_t -H5C_log_write_destroy_cache_msg(H5C_t *cache) +H5C_log_write_destroy_cache_msg(H5C_t *cache) { herr_t ret_value = SUCCEED; /* Return value */ diff --git a/src/H5Clog_json.c b/src/H5Clog_json.c index 5ac354d..ab25a55 100644 --- a/src/H5Clog_json.c +++ b/src/H5Clog_json.c @@ -143,7 +143,7 @@ static H5C_log_class_t H5C_json_log_class_g = { /*------------------------------------------------------------------------- * Function: H5C__json_write_log_message * - * Purpose: Write a message to the log file and flush the file. + * Purpose: Write a message to the log file and flush the file. * The message string is neither modified nor freed. * * Return: SUCCEED/FAIL @@ -171,7 +171,7 @@ H5C__json_write_log_message(H5C_log_json_udata_t *json_udata) if((int)n_chars != HDfprintf(json_udata->outfile, json_udata->message)) HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "error writing log message") HDmemset((void *)(json_udata->message), 0, (size_t)(n_chars * sizeof(char))); - + done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__json_write_log_message() */ @@ -222,7 +222,7 @@ H5C_log_json_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi if(NULL == (log_info->udata = H5MM_calloc(sizeof(H5C_log_json_udata_t)))) HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed") json_udata = (H5C_log_json_udata_t *)(log_info->udata); - + /* Allocate memory for the message buffer */ if(NULL == (json_udata->message = (char *)H5MM_calloc(H5C_MAX_JSON_LOG_MSG_SIZE * sizeof(char)))) HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed") @@ -341,7 +341,7 @@ H5C__json_write_start_log_msg(void *udata) HDassert(json_udata->message); /* Create the log message string (opens the JSON array) */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\n\ \"HDF5 metadata cache log messages\" : [\n\ @@ -386,7 +386,7 @@ H5C__json_write_stop_log_msg(void *udata) HDassert(json_udata->message); /* Create the log message string (closes the JSON array) */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -399,7 +399,7 @@ H5C__json_write_stop_log_msg(void *udata) /* Write the log message to the file */ if(H5C__json_write_log_message(json_udata) < 0) HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message") - + done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__json_write_stop_log_msg() */ @@ -418,7 +418,7 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5C__json_write_create_cache_log_msg(void *udata, herr_t fxn_ret_value) +H5C__json_write_create_cache_log_msg(void *udata, herr_t fxn_ret_value) { H5C_log_json_udata_t *json_udata = (H5C_log_json_udata_t *)(udata); herr_t ret_value = SUCCEED; /* Return value */ @@ -430,7 +430,7 @@ H5C__json_write_create_cache_log_msg(void *udata, herr_t fxn_ret_value) HDassert(json_udata->message); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -474,7 +474,7 @@ H5C__json_write_destroy_cache_log_msg(void *udata) HDassert(json_udata->message); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -517,7 +517,7 @@ H5C__json_write_evict_cache_log_msg(void *udata, herr_t fxn_ret_value) HDassert(json_udata->message); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -562,7 +562,7 @@ H5C__json_write_expunge_entry_log_msg(void *udata, haddr_t address, HDassert(json_udata->message); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -609,7 +609,7 @@ H5C__json_write_flush_cache_log_msg(void *udata, herr_t fxn_ret_value) HDassert(json_udata->message); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -654,7 +654,7 @@ H5C__json_write_insert_entry_log_msg(void *udata, haddr_t address, HDassert(json_udata->message); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -705,7 +705,7 @@ H5C__json_write_mark_entry_dirty_log_msg(void *udata, const H5C_cache_entry_t *e HDassert(entry); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -752,7 +752,7 @@ H5C__json_write_mark_entry_clean_log_msg(void *udata, const H5C_cache_entry_t *e HDassert(entry); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -799,7 +799,7 @@ H5C__json_write_mark_unserialized_entry_log_msg(void *udata, HDassert(entry); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -846,7 +846,7 @@ H5C__json_write_mark_serialized_entry_log_msg(void *udata, const H5C_cache_entry HDassert(entry); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -892,7 +892,7 @@ H5C__json_write_move_entry_log_msg(void *udata, haddr_t old_addr, haddr_t new_ad HDassert(json_udata->message); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -903,7 +903,7 @@ H5C__json_write_move_entry_log_msg(void *udata, haddr_t old_addr, haddr_t new_ad \"returned\":%d\ },\n\ " - , (long long)HDtime(NULL), (unsigned long)old_addr, + , (long long)HDtime(NULL), (unsigned long)old_addr, (unsigned long)new_addr, type_id, (int)fxn_ret_value); /* Write the log message to the file */ @@ -942,7 +942,7 @@ H5C__json_write_pin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry, HDassert(entry); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -951,7 +951,7 @@ H5C__json_write_pin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry, \"returned\":%d\ },\n\ " - , (long long)HDtime(NULL), (unsigned long)entry->addr, + , (long long)HDtime(NULL), (unsigned long)entry->addr, (int)fxn_ret_value); /* Write the log message to the file */ @@ -992,7 +992,7 @@ H5C__json_write_create_fd_log_msg(void *udata, const H5C_cache_entry_t *parent, HDassert(child); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -1002,7 +1002,7 @@ H5C__json_write_create_fd_log_msg(void *udata, const H5C_cache_entry_t *parent, \"returned\":%d\ },\n\ " - , (long long)HDtime(NULL), (unsigned long)parent->addr, + , (long long)HDtime(NULL), (unsigned long)parent->addr, (unsigned long)child->addr, (int)fxn_ret_value); /* Write the log message to the file */ @@ -1043,11 +1043,11 @@ H5C__json_write_protect_entry_log_msg(void *udata, const H5C_cache_entry_t *entr if(H5C__READ_ONLY_FLAG == flags) HDstrcpy(rw_s, "READ"); - else + else HDstrcpy(rw_s, "WRITE"); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -1059,7 +1059,7 @@ H5C__json_write_protect_entry_log_msg(void *udata, const H5C_cache_entry_t *entr \"returned\":%d\ },\n\ " - , (long long)HDtime(NULL), (unsigned long)entry->addr, + , (long long)HDtime(NULL), (unsigned long)entry->addr, type_id, rw_s, (int)entry->size, (int)fxn_ret_value); /* Write the log message to the file */ @@ -1098,7 +1098,7 @@ H5C__json_write_resize_entry_log_msg(void *udata, const H5C_cache_entry_t *entry HDassert(entry); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -1108,7 +1108,7 @@ H5C__json_write_resize_entry_log_msg(void *udata, const H5C_cache_entry_t *entry \"returned\":%d\ },\n\ " - , (long long)HDtime(NULL), (unsigned long)entry->addr, + , (long long)HDtime(NULL), (unsigned long)entry->addr, (int)new_size, (int)fxn_ret_value); /* Write the log message to the file */ @@ -1147,7 +1147,7 @@ H5C__json_write_unpin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry, HDassert(entry); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -1156,7 +1156,7 @@ H5C__json_write_unpin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry, \"returned\":%d\ },\n\ " - , (long long)HDtime(NULL), (unsigned long)entry->addr, + , (long long)HDtime(NULL), (unsigned long)entry->addr, (int)fxn_ret_value); /* Write the log message to the file */ @@ -1197,7 +1197,7 @@ H5C__json_write_destroy_fd_log_msg(void *udata, const H5C_cache_entry_t *parent, HDassert(child); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -1207,7 +1207,7 @@ H5C__json_write_destroy_fd_log_msg(void *udata, const H5C_cache_entry_t *parent, \"returned\":%d\ },\n\ " - , (long long)HDtime(NULL), (unsigned long)parent->addr, + , (long long)HDtime(NULL), (unsigned long)parent->addr, (unsigned long)child->addr, (int)fxn_ret_value); /* Write the log message to the file */ @@ -1245,7 +1245,7 @@ H5C__json_write_unprotect_entry_log_msg(void *udata, haddr_t address, HDassert(json_udata->message); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -1256,7 +1256,7 @@ H5C__json_write_unprotect_entry_log_msg(void *udata, haddr_t address, \"returned\":%d\ },\n\ " - , (long long)HDtime(NULL), (unsigned long)address, + , (long long)HDtime(NULL), (unsigned long)address, type_id, flags, (int)fxn_ret_value); /* Write the log message to the file */ @@ -1295,7 +1295,7 @@ H5C__json_write_set_cache_config_log_msg(void *udata, const H5AC_cache_config_t HDassert(config); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -1342,7 +1342,7 @@ H5C__json_write_remove_entry_log_msg(void *udata, const H5C_cache_entry_t *entry HDassert(entry); /* Create the log message string */ - HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, + HDsnprintf(json_udata->message, H5C_MAX_JSON_LOG_MSG_SIZE, "\ {\ \"timestamp\":%lld,\ @@ -1351,7 +1351,7 @@ H5C__json_write_remove_entry_log_msg(void *udata, const H5C_cache_entry_t *entry \"returned\":%d\ },\n\ " - , (long long)HDtime(NULL), (unsigned long)entry->addr, + , (long long)HDtime(NULL), (unsigned long)entry->addr, (int)fxn_ret_value); /* Write the log message to the file */ diff --git a/src/H5Clog_trace.c b/src/H5Clog_trace.c index 713f333..63fe382 100644 --- a/src/H5Clog_trace.c +++ b/src/H5Clog_trace.c @@ -138,7 +138,7 @@ static H5C_log_class_t H5C_trace_log_class_g = { /*------------------------------------------------------------------------- * Function: H5C__trace_write_log_message * - * Purpose: Write a message to the log file and flush the file. + * Purpose: Write a message to the log file and flush the file. * The message string is neither modified nor freed. * * Return: SUCCEED/FAIL @@ -166,7 +166,7 @@ H5C__trace_write_log_message(H5C_log_trace_udata_t *trace_udata) if((int)n_chars != HDfprintf(trace_udata->outfile, trace_udata->message)) HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "error writing log message") HDmemset((void *)(trace_udata->message), 0, (size_t)(n_chars * sizeof(char))); - + done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__trace_write_log_message() */ @@ -217,7 +217,7 @@ H5C_log_trace_set_up(H5C_log_info_t *log_info, const char log_location[], int mp if(NULL == (log_info->udata = H5MM_calloc(sizeof(H5C_log_trace_udata_t)))) HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed") trace_udata = (H5C_log_trace_udata_t *)(log_info->udata); - + /* Allocate memory for the message buffer */ if(NULL == (trace_udata->message = (char *)H5MM_calloc(H5C_MAX_TRACE_LOG_MSG_SIZE * sizeof(char)))) HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed") @@ -340,7 +340,7 @@ H5C__trace_write_expunge_entry_log_msg(void *udata, haddr_t address, HDassert(trace_udata->message); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_expunge_entry 0x%lx %d %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_expunge_entry 0x%lx %d %d\n", (unsigned long)address, type_id, (int)fxn_ret_value); /* Write the log message to the file */ @@ -377,7 +377,7 @@ H5C__trace_write_flush_cache_log_msg(void *udata, herr_t fxn_ret_value) HDassert(trace_udata->message); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_flush %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_flush %d\n", (int)fxn_ret_value); /* Write the log message to the file */ @@ -415,7 +415,7 @@ H5C__trace_write_insert_entry_log_msg(void *udata, haddr_t address, HDassert(trace_udata->message); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_insert_entry 0x%lx %d 0x%x %d %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_insert_entry 0x%lx %d 0x%x %d %d\n", (unsigned long)address, type_id, flags, (int)size, (int)fxn_ret_value); /* Write the log message to the file */ @@ -454,7 +454,7 @@ H5C__trace_write_mark_entry_dirty_log_msg(void *udata, const H5C_cache_entry_t * HDassert(entry); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_dirty 0x%lx %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_dirty 0x%lx %d\n", (unsigned long)(entry->addr), (int)fxn_ret_value); /* Write the log message to the file */ @@ -493,7 +493,7 @@ H5C__trace_write_mark_entry_clean_log_msg(void *udata, const H5C_cache_entry_t * HDassert(entry); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_clean 0x%lx %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_clean 0x%lx %d\n", (unsigned long)(entry->addr), (int)fxn_ret_value); /* Write the log message to the file */ @@ -532,7 +532,7 @@ H5C__trace_write_mark_unserialized_entry_log_msg(void *udata, HDassert(entry); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_unserialized 0x%lx %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_unserialized 0x%lx %d\n", (unsigned long)(entry->addr), (int)fxn_ret_value); /* Write the log message to the file */ @@ -571,7 +571,7 @@ H5C__trace_write_mark_serialized_entry_log_msg(void *udata, const H5C_cache_entr HDassert(entry); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_serialized 0x%lx %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_mark_entry_serialized 0x%lx %d\n", (unsigned long)(entry->addr), (int)fxn_ret_value); /* Write the log message to the file */ @@ -609,7 +609,7 @@ H5C__trace_write_move_entry_log_msg(void *udata, haddr_t old_addr, haddr_t new_a HDassert(trace_udata->message); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_move_entry 0x%lx 0x%lx %d %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_move_entry 0x%lx 0x%lx %d %d\n", (unsigned long)old_addr, (unsigned long)new_addr, type_id, (int)fxn_ret_value); /* Write the log message to the file */ @@ -648,7 +648,7 @@ H5C__trace_write_pin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry, HDassert(entry); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_pin_protected_entry 0x%lx %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_pin_protected_entry 0x%lx %d\n", (unsigned long)(entry->addr), (int)fxn_ret_value); /* Write the log message to the file */ @@ -689,7 +689,7 @@ H5C__trace_write_create_fd_log_msg(void *udata, const H5C_cache_entry_t *parent, HDassert(child); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_create_flush_dependency 0x%lx 0x%lx %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_create_flush_dependency 0x%lx 0x%lx %d\n", (unsigned long)(parent->addr), (unsigned long)(child->addr), (int)fxn_ret_value); /* Write the log message to the file */ @@ -728,7 +728,7 @@ H5C__trace_write_protect_entry_log_msg(void *udata, const H5C_cache_entry_t *ent HDassert(entry); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_protect 0x%lx %d 0x%x %d %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_protect 0x%lx %d 0x%x %d %d\n", (unsigned long)(entry->addr), type_id, flags, (int)(entry->size), (int)fxn_ret_value); /* Write the log message to the file */ @@ -767,7 +767,7 @@ H5C__trace_write_resize_entry_log_msg(void *udata, const H5C_cache_entry_t *entr HDassert(entry); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_resize_entry 0x%lx %d %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_resize_entry 0x%lx %d %d\n", (unsigned long)(entry->addr), (int)new_size, (int)fxn_ret_value); /* Write the log message to the file */ @@ -806,7 +806,7 @@ H5C__trace_write_unpin_entry_log_msg(void *udata, const H5C_cache_entry_t *entry HDassert(entry); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_unpin_entry 0x%lx %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_unpin_entry 0x%lx %d\n", (unsigned long)(entry->addr), (int)fxn_ret_value); /* Write the log message to the file */ @@ -847,7 +847,7 @@ H5C__trace_write_destroy_fd_log_msg(void *udata, const H5C_cache_entry_t *parent HDassert(child); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_destroy_flush_dependency 0x%lx 0x%lx %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_destroy_flush_dependency 0x%lx 0x%lx %d\n", (unsigned long)(parent->addr), (unsigned long)(child->addr), (int)fxn_ret_value); /* Write the log message to the file */ @@ -885,7 +885,7 @@ H5C__trace_write_unprotect_entry_log_msg(void *udata, haddr_t address, HDassert(trace_udata->message); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_unprotect 0x%lx %d 0x%x %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_unprotect 0x%lx %d 0x%x %d\n", (unsigned long)(address), type_id, flags, (int)fxn_ret_value); /* Write the log message to the file */ @@ -994,7 +994,7 @@ H5C__trace_write_remove_entry_log_msg(void *udata, const H5C_cache_entry_t *entr HDassert(entry); /* Create the log message string */ - HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_remove_entry 0x%lx %d\n", + HDsnprintf(trace_udata->message, H5C_MAX_TRACE_LOG_MSG_SIZE, "H5AC_remove_entry 0x%lx %d\n", (unsigned long)(entry->addr), (int)fxn_ret_value); /* Write the log message to the file */ diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index 2c176e4..0ac4c4f 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -60,7 +60,7 @@ /* Local Prototypes */ /********************/ static herr_t H5C__collective_write(H5F_t *f); -static herr_t H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES], +static herr_t H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES], unsigned entries_to_clear[H5C_RING_NTYPES]); static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flush, unsigned entries_to_clear); @@ -87,20 +87,20 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, * * Purpose: Apply the supplied candidate list. * - * We used to do this by simply having each process write - * every mpi_size-th entry in the candidate list, starting - * at index mpi_rank, and mark all the others clean. + * We used to do this by simply having each process write + * every mpi_size-th entry in the candidate list, starting + * at index mpi_rank, and mark all the others clean. * - * However, this can cause unnecessary contention in a file - * system by increasing the number of processes writing to + * However, this can cause unnecessary contention in a file + * system by increasing the number of processes writing to * adjacent locations in the HDF5 file. * - * To attempt to minimize this, we now arange matters such - * that each process writes n adjacent entries in the + * To attempt to minimize this, we now arange matters such + * that each process writes n adjacent entries in the * candidate list, and marks all others clean. We must do - * this in such a fashion as to guarantee that each entry - * on the candidate list is written by exactly one process, - * and marked clean by all others. + * this in such a fashion as to guarantee that each entry + * on the candidate list is written by exactly one process, + * and marked clean by all others. * * To do this, first construct a table mapping mpi_rank * to the index of the first entry in the candidate list to @@ -110,7 +110,7 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, * * Note that the table must be identical on all processes, as * all see the same candidate list, mpi_size, and mpi_rank -- - * the inputs used to construct the table. + * the inputs used to construct the table. * * We construct the table as follows. Let: * @@ -118,18 +118,18 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, * * m = num_candidates % mpi_size; * - * Now allocate an array of integers of length mpi_size + 1, - * and call this array candidate_assignment_table. + * Now allocate an array of integers of length mpi_size + 1, + * and call this array candidate_assignment_table. * * Conceptually, if the number of candidates is a multiple * of the mpi_size, we simply pass through the candidate list - * and assign n entries to each process to flush, with the - * index of the first entry to flush in the location in + * and assign n entries to each process to flush, with the + * index of the first entry to flush in the location in * the candidate_assignment_table indicated by the mpi_rank - * of the process. + * of the process. * - * In the more common case in which the candidate list isn't - * isn't a multiple of the mpi_size, we pretend it is, and + * In the more common case in which the candidate list isn't + * isn't a multiple of the mpi_size, we pretend it is, and * give num_candidates % mpi_size processes one extra entry * each to make things work out. * @@ -138,22 +138,22 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, * * first_entry_to_flush = candidate_assignment_table[mpi_rank] * - * last_entry_to_flush = + * last_entry_to_flush = * candidate_assignment_table[mpi_rank + 1] - 1; - * - * With these values determined, we simply scan through the - * candidate list, marking all entries in the range + * + * With these values determined, we simply scan through the + * candidate list, marking all entries in the range * [first_entry_to_flush, last_entry_to_flush] for flush, * and all others to be cleaned. * - * Finally, we scan the LRU from tail to head, flushing + * Finally, we scan the LRU from tail to head, flushing * or marking clean the candidate entries as indicated. * If necessary, we scan the pinned list as well. * - * Note that this function will fail if any protected or + * Note that this function will fail if any protected or * clean entries appear on the candidate list. * - * This function is used in managing sync points, and + * This function is used in managing sync points, and * shouldn't be used elsewhere. * * Return: Success: SUCCEED @@ -246,7 +246,7 @@ H5C_apply_candidate_list(H5F_t * f, for(i = 1; i < mpi_size; i++) candidate_assignment_table[i] = candidate_assignment_table[i - 1] + n; } /* end if */ - else { + else { for(i = 1; i <= m; i++) candidate_assignment_table[i] = candidate_assignment_table[i - 1] + n + 1; @@ -287,7 +287,7 @@ H5C_apply_candidate_list(H5F_t * f, HDsprintf(&(tbl_buf[HDstrlen(tbl_buf)]), "\n"); HDfprintf(stdout, "%s", tbl_buf); - HDfprintf(stdout, "%s:%d: flush entries [%u, %u].\n", + HDfprintf(stdout, "%s:%d: flush entries [%u, %u].\n", FUNC, mpi_rank, first_entry_to_flush, last_entry_to_flush); HDfprintf(stdout, "%s:%d: marking entries.\n", FUNC, mpi_rank); @@ -316,7 +316,7 @@ H5C_apply_candidate_list(H5F_t * f, if(entry_ptr->is_protected) /* For now at least, we can't deal with protected entries. * If we encounter one, scream and die. If it becomes an - * issue, we should be able to work around this. + * issue, we should be able to work around this. */ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry is protected?!?!?") @@ -328,9 +328,9 @@ H5C_apply_candidate_list(H5F_t * f, HDassert(!entry_ptr->clear_on_unprotect); /* Determine whether the entry is to be cleared or flushed, - * and mark it accordingly. We will scan the protected and + * and mark it accordingly. We will scan the protected and * pinned list shortly, and clear or flush according to these - * markings. + * markings. */ if(u >= first_entry_to_flush && u <= last_entry_to_flush) { total_entries_to_flush++; @@ -347,7 +347,7 @@ H5C_apply_candidate_list(H5F_t * f, * candidate list to clear from the cache have to be * removed from the coll list. This is OK since the * candidate list is collective and uniform across all - * ranks. + * ranks. */ if(entry_ptr->coll_access) { entry_ptr->coll_access = FALSE; @@ -368,14 +368,14 @@ H5C_apply_candidate_list(H5F_t * f, #endif /* H5C_DO_SANITY_CHECKS */ #if H5C_APPLY_CANDIDATE_LIST__DEBUG - HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %u/%u/%u.\n", + HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %u/%u/%u.\n", FUNC, mpi_rank, num_candidates, total_entries_to_clear, total_entries_to_flush); #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ - /* We have now marked all the entries on the candidate list for + /* We have now marked all the entries on the candidate list for * either flush or clear -- now scan the LRU and the pinned list - * for these entries and do the deed. Do this via a call to + * for these entries and do the deed. Do this via a call to * H5C__flush_candidate_entries(). * * Note that we are doing things in this round about manner so as @@ -412,10 +412,10 @@ done: /*------------------------------------------------------------------------- * Function: H5C_construct_candidate_list__clean_cache * - * Purpose: Construct the list of entries that should be flushed to + * Purpose: Construct the list of entries that should be flushed to * clean all entries in the cache. * - * This function is used in managing sync points, and + * This function is used in managing sync points, and * shouldn't be used elsewhere. * * Return: Success: SUCCEED @@ -448,9 +448,9 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr) * point, it is possible that some dirty entries may reside on the * pinned list at this point. */ - HDassert( cache_ptr->slist_size <= + HDassert( cache_ptr->slist_size <= (cache_ptr->dLRU_list_size + cache_ptr->pel_size) ); - HDassert( cache_ptr->slist_len <= + HDassert( cache_ptr->slist_len <= (cache_ptr->dLRU_list_len + cache_ptr->pel_len) ); if(space_needed > 0) { /* we have work to do */ @@ -462,7 +462,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr) HDassert( cache_ptr->slist_len > 0 ); /* Scan the dirty LRU list from tail forward and nominate sufficient - * entries to free up the necessary space. + * entries to free up the necessary space. */ entry_ptr = cache_ptr->dLRU_tail_ptr; while((nominated_entries_size < space_needed) && @@ -484,7 +484,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr) } /* end while */ HDassert( entry_ptr == NULL ); - /* it is possible that there are some dirty entries on the + /* it is possible that there are some dirty entries on the * protected entry list as well -- scan it too if necessary */ entry_ptr = cache_ptr->pel_head_ptr; @@ -521,10 +521,10 @@ done: /*------------------------------------------------------------------------- * Function: H5C_construct_candidate_list__min_clean * - * Purpose: Construct the list of entries that should be flushed to + * Purpose: Construct the list of entries that should be flushed to * get the cache back within its min clean constraints. * - * This function is used in managing sync points, and + * This function is used in managing sync points, and * shouldn't be used elsewhere. * * Return: Success: SUCCEED @@ -547,7 +547,7 @@ H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr) HDassert( cache_ptr != NULL ); HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); - /* compute the number of bytes (if any) that must be flushed to get the + /* compute the number of bytes (if any) that must be flushed to get the * cache back within its min clean constraints. */ if(cache_ptr->max_cache_size > cache_ptr->index_size) { @@ -575,7 +575,7 @@ H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr) HDassert( cache_ptr->slist_len > 0 ); /* Scan the dirty LRU list from tail forward and nominate sufficient - * entries to free up the necessary space. + * entries to free up the necessary space. */ entry_ptr = cache_ptr->dLRU_tail_ptr; while((nominated_entries_size < space_needed) && @@ -751,23 +751,23 @@ H5C_mark_entries_as_clean(H5F_t * f, * any protected entries will not be on the LRU, and therefore * will not be flushed at this time. * - * Note that unlike H5C_apply_candidate_list(), - * H5C_mark_entries_as_clean() makes all its calls to - * H5C__flush_single_entry() with the H5C__FLUSH_CLEAR_ONLY_FLAG - * set. As a result, the pre_serialize() and serialize calls are + * Note that unlike H5C_apply_candidate_list(), + * H5C_mark_entries_as_clean() makes all its calls to + * H5C__flush_single_entry() with the H5C__FLUSH_CLEAR_ONLY_FLAG + * set. As a result, the pre_serialize() and serialize calls are * not made. * - * This then implies that (assuming such actions were - * permitted in the parallel case) no loads, dirties, - * resizes, or removals of other entries can occur as + * This then implies that (assuming such actions were + * permitted in the parallel case) no loads, dirties, + * resizes, or removals of other entries can occur as * a side effect of the flush. Hence, there is no need - * for the checks for entry removal / status change + * for the checks for entry removal / status change * that I ported to H5C_apply_candidate_list(). * * However, if (in addition to allowing such operations * in the parallel case), we allow such operations outside - * of the pre_serialize / serialize routines, this may - * cease to be the case -- requiring a review of this + * of the pre_serialize / serialize routines, this may + * cease to be the case -- requiring a review of this * point. * JRM -- 4/7/15 */ @@ -813,7 +813,7 @@ H5C_mark_entries_as_clean(H5F_t * f, pinned_entries_cleared++; progress = TRUE; - if(H5C__flush_single_entry(f, clear_ptr, + if(H5C__flush_single_entry(f, clear_ptr, (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__UPDATE_PAGE_BUFFER_FLAG)) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear entry") } /* end if */ @@ -860,7 +860,7 @@ done: * * Function: H5C_clear_coll_entries * - * Purpose: Clear half or the entire list of collective entries and + * Purpose: Clear half or the entire list of collective entries and * mark them as independent. * * Return: FAIL if error is detected, SUCCEED otherwise. @@ -872,7 +872,7 @@ done: */ herr_t H5C_clear_coll_entries(H5C_t *cache_ptr, hbool_t partial) -{ +{ uint32_t clear_cnt; H5C_cache_entry_t * entry_ptr = NULL; herr_t ret_value = SUCCEED; @@ -1042,8 +1042,8 @@ H5C__collective_write(H5F_t *f) info = *info_p; - /* just to match up with the 1st MPI_File_set_view from - * H5FD_mpio_write() + /* just to match up with the 1st MPI_File_set_view from + * H5FD_mpio_write() */ if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, MPI_BYTE, "native", info))) HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) @@ -1053,8 +1053,8 @@ H5C__collective_write(H5F_t *f) if(MPI_SUCCESS != (mpi_code = MPI_File_write_at_all(mpi_fh, (MPI_Offset)0, NULL, 0, MPI_BYTE, &mpi_stat))) HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at_all failed", mpi_code) - /* just to match up with the 2nd MPI_File_set_view (reset) in - * H5FD_mpio_write() + /* just to match up with the 2nd MPI_File_set_view (reset) in + * H5FD_mpio_write() */ if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, MPI_BYTE, "native", info))) HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 6608bc2..8712af5 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -149,8 +149,8 @@ * * JRM - 9/8/05 * - * - Added macros supporting the index list -- a doubly liked list of - * all entries in the index. This list is necessary to reduce the + * - Added macros supporting the index list -- a doubly liked list of + * all entries in the index. This list is necessary to reduce the * cost of visiting all entries in the cache, which was previously * done via a scan of the hash table. * @@ -963,16 +963,16 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ * * JRM -- 11/5/08 * - * - Updated existing index macros and sanity check macros to maintain + * - Updated existing index macros and sanity check macros to maintain * the index_ring_len, index_ring_size, clean_index_ring_size, and * dirty_index_ring_size fields of H5C_t. * * JRM -- 9/1/15 * - * - Updated existing index macros and sanity checks macros to + * - Updated existing index macros and sanity checks macros to * maintain an doubly linked list of all entries in the index. * This is necessary to reduce the computational cost of visiting - * all entries in the index, which used to be done by scanning + * all entries in the index, which used to be done by scanning * the hash table. * * JRM -- 10/15/15 @@ -1561,7 +1561,7 @@ if ( ( (cache_ptr)->index_size != \ * flush. * * JRM -- 12/13/14 - * Added code to set cache_ptr->slist_changed to TRUE + * Added code to set cache_ptr->slist_changed to TRUE * when an entry is inserted in the slist. * * JRM -- 9/1/15 @@ -1762,7 +1762,7 @@ if ( ( (cache_ptr)->index_size != \ * flush. * * JRM -- 12/13/14 - * Note that we do not set cache_ptr->slist_changed to TRUE + * Note that we do not set cache_ptr->slist_changed to TRUE * in this case, as the structure of the slist is not * modified. * @@ -2243,14 +2243,14 @@ if ( ( (cache_ptr)->index_size != \ * Macro: H5C__UPDATE_RP_FOR_INSERT_APPEND * * Purpose: Update the replacement policy data structures for an - * insertion of the specified cache entry. + * insertion of the specified cache entry. * - * Unlike H5C__UPDATE_RP_FOR_INSERTION below, mark the - * new entry as the LEAST recently used entry, not the - * most recently used. + * Unlike H5C__UPDATE_RP_FOR_INSERTION below, mark the + * new entry as the LEAST recently used entry, not the + * most recently used. * - * For now at least, this macro should only be used in - * the reconstruction of the metadata cache from a cache + * For now at least, this macro should only be used in + * the reconstruction of the metadata cache from a cache * image block. * * At present, we only support the modified LRU policy, so @@ -3491,7 +3491,7 @@ typedef struct H5C_tag_info_t { * * JRM - 9/26/05 * - * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. + * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. * This field is used to validate pointers to instances of * H5C_t. * @@ -3544,7 +3544,7 @@ typedef struct H5C_tag_info_t { * clean data so as to avoid case b) above. Again, this is * a soft limit. * - * close_warning_received: Boolean flag indicating that a file closing + * close_warning_received: Boolean flag indicating that a file closing * warning has been received. * * @@ -3576,7 +3576,7 @@ typedef struct H5C_tag_info_t { * this flag is set to FALSE, the metadata cache will not * attempt to evict entries to make space for newly protected * entries, and instead the will grow without limit. - * + * * Needless to say, this feature must be used with care. * * @@ -3586,12 +3586,12 @@ typedef struct H5C_tag_info_t { * Addendum: JRM -- 10/14/15 * * We sometimes need to visit all entries in the cache. In the past, this - * was done by scanning the hash table. However, this is expensive, and - * we have come to scan the hash table often enough that it has become a - * performance issue. To repair this, I have added code to maintain a - * list of all entries in the index -- call this list the index list. + * was done by scanning the hash table. However, this is expensive, and + * we have come to scan the hash table often enough that it has become a + * performance issue. To repair this, I have added code to maintain a + * list of all entries in the index -- call this list the index list. * - * The index list is maintained by the same macros that maintain the + * The index list is maintained by the same macros that maintain the * index, and must have the same length and size as the index proper. * * index_len: Number of entries currently in the hash table used to index @@ -3606,12 +3606,12 @@ typedef struct H5C_tag_info_t { * index_size by three should yield a conservative estimate * of the cache's memory footprint. * - * index_ring_len: Array of integer of length H5C_RING_NTYPES used to - * maintain a count of entries in the index by ring. Note - * that the sum of all the cells in this array must equal + * index_ring_len: Array of integer of length H5C_RING_NTYPES used to + * maintain a count of entries in the index by ring. Note + * that the sum of all the cells in this array must equal * the value stored in index_len above. * - * index_ring_size: Array of size_t of length H5C_RING_NTYPES used to + * index_ring_size: Array of size_t of length H5C_RING_NTYPES used to * maintain the sum of the sizes of all entries in the index * by ring. Note that the sum of all cells in this array must * equal the value stored in index_size above. @@ -3631,8 +3631,8 @@ typedef struct H5C_tag_info_t { * in the cache. * * clean_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to - * maintain the sum of the sizes of all clean entries in the - * index by ring. Note that the sum of all cells in this array + * maintain the sum of the sizes of all clean entries in the + * index by ring. Note that the sum of all cells in this array * must equal the value stored in clean_index_size above. * * dirty_index_size: Number of bytes of dirty entries currently stored in @@ -3642,8 +3642,8 @@ typedef struct H5C_tag_info_t { * dirty_index_size == index_size. * * dirty_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to - * maintain the sum of the sizes of all dirty entries in the - * index by ring. Note that the sum of all cells in this array + * maintain the sum of the sizes of all dirty entries in the + * index by ring. Note that the sum of all cells in this array * must equal the value stored in dirty_index_size above. * * index: Array of pointer to H5C_cache_entry_t of size @@ -3661,52 +3661,52 @@ typedef struct H5C_tag_info_t { * changing the H5C__HASH_FCN macro and the deletion of the * H5C__HASH_MASK #define. No other changes should be required. * - * il_len: Number of entries on the index list. + * il_len: Number of entries on the index list. * - * This must always be equal to index_len. As such, this - * field is redundant. However, the existing linked list - * management macros expect to maintain a length field, so + * This must always be equal to index_len. As such, this + * field is redundant. However, the existing linked list + * management macros expect to maintain a length field, so * this field exists primarily to avoid adding complexity to * these macros. * * il_size: Number of bytes of cache entries currently stored in the * index list. * - * This must always be equal to index_size. As such, this - * field is redundant. However, the existing linked list - * management macros expect to maintain a size field, so + * This must always be equal to index_size. As such, this + * field is redundant. However, the existing linked list + * management macros expect to maintain a size field, so * this field exists primarily to avoid adding complexity to * these macros. * * il_head: Pointer to the head of the doubly linked list of entries in - * the index list. Note that cache entries on this list are + * the index list. Note that cache entries on this list are * linked by their il_next and il_prev fields. * * This field is NULL if the index is empty. * * il_tail: Pointer to the tail of the doubly linked list of entries in - * the index list. Note that cache entries on this list are + * the index list. Note that cache entries on this list are * linked by their il_next and il_prev fields. * * This field is NULL if the index is empty. * * - * With the addition of the take ownership flag, it is possible that - * an entry may be removed from the cache as the result of the flush of - * a second entry. In general, this causes little trouble, but it is - * possible that the entry removed may be the next entry in the scan of - * a list. In this case, we must be able to detect the fact that the + * With the addition of the take ownership flag, it is possible that + * an entry may be removed from the cache as the result of the flush of + * a second entry. In general, this causes little trouble, but it is + * possible that the entry removed may be the next entry in the scan of + * a list. In this case, we must be able to detect the fact that the * entry has been removed, so that the scan doesn't attempt to proceed with * an entry that is no longer in the cache. * * The following fields are maintained to facilitate this. * * entries_removed_counter: Counter that is incremented each time an - * entry is removed from the cache by any means (eviction, + * entry is removed from the cache by any means (eviction, * expungement, or take ownership at this point in time). * Functions that perform scans on lists may set this field - * to zero prior to calling H5C__flush_single_entry(). - * Unexpected changes to the counter indicate that an entry + * to zero prior to calling H5C__flush_single_entry(). + * Unexpected changes to the counter indicate that an entry * was removed from the cache as a side effect of the flush. * * last_entry_removed_ptr: Pointer to the instance of H5C_cache_entry_t @@ -3715,11 +3715,11 @@ typedef struct H5C_tag_info_t { * performing a scan of a list has set this field to NULL prior * to calling H5C__flush_single_entry(). * - * WARNING!!! This field must NEVER be dereferenced. It is + * WARNING!!! This field must NEVER be dereferenced. It is * maintained to allow functions that perform scans of lists * to compare this pointer with their pointers to next, thus * allowing them to avoid unnecessary restarts of scans if the - * pointers don't match, and if entries_removed_counter is + * pointers don't match, and if entries_removed_counter is * one. * * entry_watched_for_removal: Pointer to an instance of H5C_cache_entry_t @@ -3740,11 +3740,11 @@ typedef struct H5C_tag_info_t { * are flushed. (this has been changed -- dirty entries are now removed from * the skip list as they are flushed. JRM - 10/25/05) * - * slist_changed: Boolean flag used to indicate whether the contents of + * slist_changed: Boolean flag used to indicate whether the contents of * the slist has changed since the last time this flag was - * reset. This is used in the cache flush code to detect + * reset. This is used in the cache flush code to detect * conditions in which pre-serialize or serialize callbacks - * have modified the slist -- which obliges us to restart + * have modified the slist -- which obliges us to restart * the scan of the slist from the beginning. * * slist_len: Number of entries currently in the skip list @@ -3755,14 +3755,14 @@ typedef struct H5C_tag_info_t { * skip list used to maintain a sorted list of * dirty entries in the cache. * - * slist_ring_len: Array of integer of length H5C_RING_NTYPES used to - * maintain a count of entries in the slist by ring. Note - * that the sum of all the cells in this array must equal + * slist_ring_len: Array of integer of length H5C_RING_NTYPES used to + * maintain a count of entries in the slist by ring. Note + * that the sum of all the cells in this array must equal * the value stored in slist_len above. * * slist_ring_size: Array of size_t of length H5C_RING_NTYPES used to - * maintain the sum of the sizes of all entries in the - * slist by ring. Note that the sum of all cells in this + * maintain the sum of the sizes of all entries in the + * slist by ring. Note that the sum of all cells in this * array must equal the value stored in slist_size above. * * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted @@ -3826,7 +3826,7 @@ typedef struct H5C_tag_info_t { * num_objs_corked: Unsigned integer field containing the number of objects * that are "corked". The "corked" status of an object is * found by searching the "tag_list". This field is added - * for optimization so that the skip list search on "tag_list" + * for optimization so that the skip list search on "tag_list" * can be skipped if this field is zero, i.e. no "corked" * objects. * @@ -3867,7 +3867,7 @@ typedef struct H5C_tag_info_t { * * 2) A pinned entry can be accessed or modified at any time. * This places an additional burden on the associated pre-serialize - * and serialize callbacks, which must ensure the the entry is in + * and serialize callbacks, which must ensure the the entry is in * a consistent state before creating an image of it. * * 3) A pinned entry can be marked as dirty (and possibly @@ -3878,7 +3878,7 @@ typedef struct H5C_tag_info_t { * flush. * * Since pinned entries cannot be evicted, they must be kept on a pinned - * entry list (pel), instead of being entrusted to the replacement policy + * entry list (pel), instead of being entrusted to the replacement policy * code. * * Maintaining the pinned entry list requires the following fields: @@ -3907,7 +3907,7 @@ typedef struct H5C_tag_info_t { * * While there has been interest in several replacement policies for * this cache, the initial development schedule is tight. Thus I have - * elected to support only a modified LRU (least recently used) policy + * elected to support only a modified LRU (least recently used) policy * for the first cut. * * To further simplify matters, I have simply included the fields needed @@ -3926,7 +3926,7 @@ typedef struct H5C_tag_info_t { * be collective and the other processes will not know to participate. * * To deal with this issue, I have modified the usual LRU policy by adding - * clean and dirty LRU lists to the usual LRU list. In general, these + * clean and dirty LRU lists to the usual LRU list. In general, these * lists are only exist in parallel builds. * * The clean LRU list is simply the regular LRU list with all dirty cache @@ -3954,13 +3954,13 @@ typedef struct H5C_tag_info_t { * * LRU_list_len: Number of cache entries currently on the LRU list. * - * Observe that LRU_list_len + pl_len + pel_len must always + * Observe that LRU_list_len + pl_len + pel_len must always * equal index_len. * * LRU_list_size: Number of bytes of cache entries currently residing on the * LRU list. * - * Observe that LRU_list_size + pl_size + pel_size must always + * Observe that LRU_list_size + pl_size + pel_size must always * equal index_size. * * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache @@ -4071,25 +4071,25 @@ typedef struct H5C_tag_info_t { * size is decreased. The flag triggers a call to * H5C__make_space_in_cache() on the next call to H5C_protect(). * - * resize_in_progress: As the metadata cache has become re-entrant, it is - * possible that a protect may trigger a call to + * resize_in_progress: As the metadata cache has become re-entrant, it is + * possible that a protect may trigger a call to * H5C__auto_adjust_cache_size(), which may trigger a flush, - * which may trigger a protect, which will result in another - * call to H5C__auto_adjust_cache_size(). + * which may trigger a protect, which will result in another + * call to H5C__auto_adjust_cache_size(). * * The resize_in_progress boolean flag is used to detect this, * and to prevent the infinite recursion that would otherwise * occur. * - * Note that this issue is not hypothetical -- this field - * was added 12/29/15 to fix a bug exposed in the testing + * Note that this issue is not hypothetical -- this field + * was added 12/29/15 to fix a bug exposed in the testing * of changes to the file driver info superblock extension * management code needed to support rings. * * msic_in_progress: As the metadata cache has become re-entrant, and as - * the free space manager code has become more tightly - * integrated with the metadata cache, it is possible that - * a call to H5C_insert_entry() may trigger a call to + * the free space manager code has become more tightly + * integrated with the metadata cache, it is possible that + * a call to H5C_insert_entry() may trigger a call to * H5C_make_space_in_cache(), which, via H5C__flush_single_entry() * and client callbacks, may trigger an infinite regression * of calls to H5C_make_space_in_cache(). @@ -4098,9 +4098,9 @@ typedef struct H5C_tag_info_t { * and prevent the infinite regression that would otherwise * occur. * - * Note that this is issue is not hypothetical -- this field - * was added 2/16/17 to address this issue when it was - * exposed by modifications to test/fheap.c to cause it to + * Note that this is issue is not hypothetical -- this field + * was added 2/16/17 to address this issue when it was + * exposed by modifications to test/fheap.c to cause it to * use paged allocation. * * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration @@ -4184,45 +4184,45 @@ typedef struct H5C_tag_info_t { * data for generation of a cache image on file close. * * serialization_in_progress: Boolean field that is set to TRUE iff - * the cache is in the process of being serialized. This + * the cache is in the process of being serialized. This * field is needed to support the H5C_serialization_in_progress() * call, which is in turn required for sanity checks in some * cache clients. * - * load_image: Boolean flag indicating that the metadata cache image - * superblock extension message exists and should be + * load_image: Boolean flag indicating that the metadata cache image + * superblock extension message exists and should be * read, and the image block read and decoded on the next - * call to H5C_protect(). + * call to H5C_protect(). * - * image_loaded: Boolean flag indicating that the metadata cache has - * loaded the metadata cache image as directed by the + * image_loaded: Boolean flag indicating that the metadata cache has + * loaded the metadata cache image as directed by the * MDC cache image superblock extension message. * * delete_image: Boolean flag indicating whether the metadata cache image * superblock message should be deleted and the cache image * file space freed after they have been read and decoded. * - * This flag should be set to TRUE iff the file is opened + * This flag should be set to TRUE iff the file is opened * R/W and there is a cache image to be read. * - * image_addr: haddr_t containing the base address of the on disk - * metadata cache image, or HADDR_UNDEF if that value is - * undefined. Note that this field is used both in the - * construction and write, and the read and decode of + * image_addr: haddr_t containing the base address of the on disk + * metadata cache image, or HADDR_UNDEF if that value is + * undefined. Note that this field is used both in the + * construction and write, and the read and decode of * metadata cache image blocks. * - * image_len: hsize_t containing the size of the on disk metadata cache - * image, or zero if that value is undefined. Note that this - * field is used both in the construction and write, and the + * image_len: hsize_t containing the size of the on disk metadata cache + * image, or zero if that value is undefined. Note that this + * field is used both in the construction and write, and the * read and decode of metadata cache image blocks. * - * image_data_len: size_t containing the number of bytes of data in the - * on disk metadata cache image, or zero if that value is + * image_data_len: size_t containing the number of bytes of data in the + * on disk metadata cache image, or zero if that value is * undefined. * * In most cases, this value is the same as the image_len * above. It exists to allow for metadata cache image blocks - * that are larger than the actual image. Thus in all + * that are larger than the actual image. Thus in all * cases image_data_len <= image_len. * * To create the metadata cache image, we must first serialize all the @@ -4232,27 +4232,27 @@ typedef struct H5C_tag_info_t { * height in increasing order. * * This operation is complicated by the fact that entries other the the - * target may be inserted, loaded, relocated, or removed from the cache - * (either by eviction or the take ownership flag) as the result of a - * pre_serialize or serialize callback. While entry removals are not + * target may be inserted, loaded, relocated, or removed from the cache + * (either by eviction or the take ownership flag) as the result of a + * pre_serialize or serialize callback. While entry removals are not * a problem for the scan of the index, insertions, loads, and relocations - * are. Hence the entries loaded, inserted, and relocated counters - * listed below have been implemented to allow these conditions to be + * are. Hence the entries loaded, inserted, and relocated counters + * listed below have been implemented to allow these conditions to be * detected and dealt with by restarting the scan. * - * The serialization operation is further complicated by the fact that - * the flush dependency height of a given entry may increase (as the - * result of an entry load or insert) or decrease (as the result of an + * The serialization operation is further complicated by the fact that + * the flush dependency height of a given entry may increase (as the + * result of an entry load or insert) or decrease (as the result of an * entry removal -- via either eviction or the take ownership flag). The * entry_fd_height_change_counter field is maintained to allow detection * of this condition, and a restart of the scan when it occurs. * * Note that all these new fields would work just as well as booleans. * - * entries_loaded_counter: Number of entries loaded into the cache + * entries_loaded_counter: Number of entries loaded into the cache * since the last time this field was reset. * - * entries_inserted_counter: Number of entries inserted into the cache + * entries_inserted_counter: Number of entries inserted into the cache * since the last time this field was reset. * * entries relocated_counter: Number of entries whose base address has @@ -4261,32 +4261,32 @@ typedef struct H5C_tag_info_t { * entry_fd_height_change_counter: Number of entries whose flush dependency * height has changed since the last time this field was reset. * - * The following fields are used assemble the cache image prior to + * The following fields are used assemble the cache image prior to * writing it to disk. * * num_entries_in_image: Unsigned integer field containing the number of entries - * to be copied into the metadata cache image. Note that - * this value will be less than the number of entries in - * the cache, and the superblock and its related entries + * to be copied into the metadata cache image. Note that + * this value will be less than the number of entries in + * the cache, and the superblock and its related entries * are not written to the metadata cache image. * * image_entries: Pointer to a dynamically allocated array of instance of * H5C_image_entry_t of length num_entries_in_image, or NULL * if that array does not exist. This array is used to - * assemble entry data to be included in the image, and to + * assemble entry data to be included in the image, and to * sort them by flush dependency height and LRU rank. - * + * * image_buffer: Pointer to the dynamically allocated buffer of length - * image_len in which the metadata cache image is assembled, + * image_len in which the metadata cache image is assembled, * or NULL if that buffer does not exist. * * * Free Space Manager Related fields: * - * The free space managers must be informed when we are about to close + * The free space managers must be informed when we are about to close * or flush the file so that they order themselves accordingly. This used - * to be done much later in the close process, but with cache image and - * page buffering, this is no longer viable, as we must finalize the on + * to be done much later in the close process, but with cache image and + * page buffering, this is no longer viable, as we must finalize the on * disk image of all metadata much sooner. * * This is handled by the H5MF_settle_raw_data_fsm() and @@ -4300,11 +4300,11 @@ typedef struct H5C_tag_info_t { * flush is complete. * * rdfsm_settled: Boolean flag indicating whether the raw data free space - * manager is settled -- i.e. whether the correct space has + * manager is settled -- i.e. whether the correct space has * been allocated for it in the file. * - * Note that the name of this field is deceptive. In the - * multi file case, the flag applies to all free space + * Note that the name of this field is deceptive. In the + * multi file case, the flag applies to all free space * managers that are not involved in allocating space for * free space manager metadata. * @@ -4312,9 +4312,9 @@ typedef struct H5C_tag_info_t { * manager is settled -- i.e. whether the correct space has * been allocated for it in the file. * - * Note that the name of this field is deceptive. In the - * multi file case, the flag applies only to free space - * managers that are involved in allocating space for free + * Note that the name of this field is deceptive. In the + * multi file case, the flag applies only to free space + * managers that are involved in allocating space for free * space managers. * * @@ -4358,7 +4358,7 @@ typedef struct H5C_tag_info_t { * id equal to the array index has been inserted into the * cache in the current epoch. * - * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. + * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. * The cells are used to record the number of times an entry * with type id equal to the array index has been inserted * pinned into the cache in the current epoch. @@ -4378,9 +4378,9 @@ typedef struct H5C_tag_info_t { * equal to the array index has been evicted from the cache in * the current epoch. * - * take_ownerships: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The - * cells are used to record the number of times an entry with - * type id equal to the array index has been removed from the + * take_ownerships: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The + * cells are used to record the number of times an entry with + * type id equal to the array index has been removed from the * cache via the H5C__TAKE_OWNERSHIP_FLAG in the current epoch. * * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells @@ -4388,12 +4388,12 @@ typedef struct H5C_tag_info_t { * id equal to the array index has been moved in the current * epoch. * - * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. + * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. * The cells are used to record the number of times an entry * with type id equal to the array index has been moved * during its pre-serialize callback in the current epoch. * - * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. + * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. * The cells are used to record the number of times an entry * with type id equal to the array index has been moved * during a cache flush in the current epoch. @@ -4436,7 +4436,7 @@ typedef struct H5C_tag_info_t { * entry_flush_size_changes: Array of int64 of length * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record * the number of times an entry with type id equal to the - * array index has changed size while in its pre-serialize + * array index has changed size while in its pre-serialize * callback. * * cache_flush_size_changes: Array of int64 of length @@ -4500,7 +4500,7 @@ typedef struct H5C_tag_info_t { * enforcing the min_clean_fraction in H5C__make_space_in_cache(). * * total_dirty_pf_entries_skipped_in_msic: Number of dirty prefetched entries - * skipped in H5C__make_space_in_cache(). Note that this can + * skipped in H5C__make_space_in_cache(). Note that this can * only occur when a file is opened R/O with a cache image * containing dirty entries. * @@ -4512,7 +4512,7 @@ typedef struct H5C_tag_info_t { * * max_dirty_pf_entries_skipped_in_msic: Maximum number of dirty prefetched * entries skipped in any one call to H5C__make_space_in_cache(). - * Note that this can only occur when the file is opened + * Note that this can only occur when the file is opened * R/O with a cache image containing dirty entries. * * max_entries_scanned_in_msic: Maximum number of entries scanned over @@ -4522,24 +4522,24 @@ typedef struct H5C_tag_info_t { * for entries to evict in order to make space in cache. * * - * The following fields track statistics on cache images. + * The following fields track statistics on cache images. * * images_created: Integer field containing the number of cache images - * created since the last time statistics were reset. + * created since the last time statistics were reset. * * At present, this field must always be either 0 or 1. - * Further, since cache images are only created at file + * Further, since cache images are only created at file * close, this field should only be set at that time. * - * images_read: Integer field containing the number of cache images + * images_read: Integer field containing the number of cache images * read from file. Note that reading an image is different * from loading it -- reading the image means just that, * while loading the image refers to decoding it and loading * it into the metadata cache. * - * In the serial case, image_read should always equal - * images_loaded. However, in the parallel case, the - * image should only be read by process 0. All other + * In the serial case, image_read should always equal + * images_loaded. However, in the parallel case, the + * image should only be read by process 0. All other * processes should receive the cache image via a broadcast * from process 0. * @@ -4547,25 +4547,25 @@ typedef struct H5C_tag_info_t { * loaded since the last time statistics were reset. * * At present, this field must always be either 0 or 1. - * Further, since cache images are only loaded at the + * Further, since cache images are only loaded at the * time of the first protect or on file close, this value * should only change on those events. * * last_image_size: Size of the most recently loaded metadata cache image * loaded into the cache, or zero if no image has been - * loaded. + * loaded. * - * At present, at most one cache image can be loaded into + * At present, at most one cache image can be loaded into * the metadata cache for any given file, and this image * will be loaded either on the first protect, or on file * close if no entry is protected before then. * * * Fields for tracking prefetched entries. Note that flushes and evictions - * of prefetched entries are tracked in the flushes and evictions arrays + * of prefetched entries are tracked in the flushes and evictions arrays * discused above. * - * prefetches: Number of prefetched entries that are loaded to the + * prefetches: Number of prefetched entries that are loaded to the * cache. * * dirty_prefetches: Number of dirty prefetched entries that are loaded @@ -4573,30 +4573,30 @@ typedef struct H5C_tag_info_t { * * prefetch_hits: Number of prefetched entries that are actually used. * - * - * As entries are now capable of moving, loading, dirtying, and deleting - * other entries in their pre_serialize and serialize callbacks, it has - * been necessary to insert code to restart scans of lists so as to avoid - * improper behavior if the next entry in the list is the target of one on + * + * As entries are now capable of moving, loading, dirtying, and deleting + * other entries in their pre_serialize and serialize callbacks, it has + * been necessary to insert code to restart scans of lists so as to avoid + * improper behavior if the next entry in the list is the target of one on * these operations. * - * The following fields are use to count such occurrences. They are used - * both in tests (to verify that the scan has been restarted), and to + * The following fields are use to count such occurrences. They are used + * both in tests (to verify that the scan has been restarted), and to * obtain estimates of how frequently these restarts occur. * * slist_scan_restarts: Number of times a scan of the slist (that contains - * calls to H5C__flush_single_entry()) has been restarted to - * avoid potential issues with change of status of the next + * calls to H5C__flush_single_entry()) has been restarted to + * avoid potential issues with change of status of the next * entry in the scan. * * LRU_scan_restarts: Number of times a scan of the LRU list (that contains - * calls to H5C__flush_single_entry()) has been restarted to - * avoid potential issues with change of status of the next + * calls to H5C__flush_single_entry()) has been restarted to + * avoid potential issues with change of status of the next * entry in the scan. * - * index_scan_restarts: Number of times a scan of the index has been + * index_scan_restarts: Number of times a scan of the index has been * restarted to avoid potential issues with load, insertion - * or change in flush dependency height of an entry other + * or change in flush dependency height of an entry other * than the target entry as the result of call(s) to the * pre_serialize or serialize callbacks. * @@ -4850,7 +4850,7 @@ struct H5C_t { int32_t max_dirty_pf_entries_skipped_in_msic; int32_t max_entries_scanned_in_msic; int64_t entries_scanned_to_make_space; - + /* Fields for tracking skip list scan restarts */ int64_t slist_scan_restarts; int64_t LRU_scan_restarts; diff --git a/src/H5Cprefetched.c b/src/H5Cprefetched.c index 0befdf9..954dd60 100644 --- a/src/H5Cprefetched.c +++ b/src/H5Cprefetched.c @@ -66,7 +66,7 @@ static herr_t H5C__prefetched_entry_pre_serialize(H5F_t *f, void *thing, unsigned *flags_ptr); static herr_t H5C__prefetched_entry_serialize(const H5F_t *f, void *image_ptr, size_t len, void *thing); -static herr_t H5C__prefetched_entry_notify(H5C_notify_action_t action, +static herr_t H5C__prefetched_entry_notify(H5C_notify_action_t action, void *thing); static herr_t H5C__prefetched_entry_free_icr(void *thing); static herr_t H5C__prefetched_entry_fsf_size(const void *thing, @@ -111,12 +111,12 @@ const H5AC_class_t H5AC_PREFETCHED_ENTRY[1] = {{ /*************************************************************************** - * With two exceptions, these functions should never be called, and thus + * With two exceptions, these functions should never be called, and thus * there is little point in documenting them separately as they all simply * throw an error. * * See header comments for the two exceptions (free_icr and notify). - * + * * JRM - 8/13/15 * ***************************************************************************/ @@ -157,8 +157,8 @@ H5C__prefetched_entry_verify_chksum(const void H5_ATTR_UNUSED *image_ptr, static void * -H5C__prefetched_entry_deserialize(const void H5_ATTR_UNUSED * image_ptr, - size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED * udata, +H5C__prefetched_entry_deserialize(const void H5_ATTR_UNUSED * image_ptr, + size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED * udata, hbool_t H5_ATTR_UNUSED * dirty_ptr) { FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */ @@ -182,9 +182,9 @@ H5C__prefetched_entry_image_len(const void H5_ATTR_UNUSED *thing, static herr_t -H5C__prefetched_entry_pre_serialize(H5F_t H5_ATTR_UNUSED *f, void H5_ATTR_UNUSED *thing, +H5C__prefetched_entry_pre_serialize(H5F_t H5_ATTR_UNUSED *f, void H5_ATTR_UNUSED *thing, haddr_t H5_ATTR_UNUSED addr, size_t H5_ATTR_UNUSED len, - haddr_t H5_ATTR_UNUSED *new_addr_ptr, size_t H5_ATTR_UNUSED *new_len_ptr, + haddr_t H5_ATTR_UNUSED *new_addr_ptr, size_t H5_ATTR_UNUSED *new_len_ptr, unsigned H5_ATTR_UNUSED *flags_ptr) { FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */ @@ -196,7 +196,7 @@ H5C__prefetched_entry_pre_serialize(H5F_t H5_ATTR_UNUSED *f, void H5_ATTR_UNUSED static herr_t -H5C__prefetched_entry_serialize(const H5F_t H5_ATTR_UNUSED *f, +H5C__prefetched_entry_serialize(const H5F_t H5_ATTR_UNUSED *f, void H5_ATTR_UNUSED *image_ptr, size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED *thing) { @@ -211,7 +211,7 @@ H5C__prefetched_entry_serialize(const H5F_t H5_ATTR_UNUSED *f, /*------------------------------------------------------------------------- * Function: H5C__prefetched_entry_notify * - * Purpose: On H5AC_NOTIFY_ACTION_BEFORE_EVICT, check to see if the + * Purpose: On H5AC_NOTIFY_ACTION_BEFORE_EVICT, check to see if the * target entry is a child in a flush dependency relationship. * If it is, destroy that flush dependency relationship. * @@ -243,18 +243,18 @@ H5C__prefetched_entry_notify(H5C_notify_action_t action, void *_thing) case H5C_NOTIFY_ACTION_AFTER_INSERT: case H5C_NOTIFY_ACTION_AFTER_LOAD: case H5C_NOTIFY_ACTION_AFTER_FLUSH: - case H5C_NOTIFY_ACTION_ENTRY_DIRTIED: - case H5C_NOTIFY_ACTION_ENTRY_CLEANED: - case H5C_NOTIFY_ACTION_CHILD_DIRTIED: - case H5C_NOTIFY_ACTION_CHILD_CLEANED: - case H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED: - case H5C_NOTIFY_ACTION_CHILD_SERIALIZED: + case H5C_NOTIFY_ACTION_ENTRY_DIRTIED: + case H5C_NOTIFY_ACTION_ENTRY_CLEANED: + case H5C_NOTIFY_ACTION_CHILD_DIRTIED: + case H5C_NOTIFY_ACTION_CHILD_CLEANED: + case H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED: + case H5C_NOTIFY_ACTION_CHILD_SERIALIZED: /* do nothing */ break; case H5C_NOTIFY_ACTION_BEFORE_EVICT: for(u = 0; u < entry_ptr->flush_dep_nparents; u++) { - H5C_cache_entry_t * parent_ptr; + H5C_cache_entry_t * parent_ptr; /* Sanity checks */ HDassert(entry_ptr->flush_dep_parent); @@ -268,9 +268,9 @@ H5C__prefetched_entry_notify(H5C_notify_action_t action, void *_thing) HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "unable to destroy prefetched entry flush dependency") if(parent_ptr->prefetched) { - /* In prefetched entries, the fd_child_count field is - * used in sanity checks elsewhere. Thus update this - * field to reflect the destruction of the flush + /* In prefetched entries, the fd_child_count field is + * used in sanity checks elsewhere. Thus update this + * field to reflect the destruction of the flush * dependency relationship. */ HDassert(parent_ptr->fd_child_count > 0); @@ -335,8 +335,8 @@ done: } /* end H5C__prefetched_entry_free_icr() */ -static herr_t -H5C__prefetched_entry_fsf_size(const void H5_ATTR_UNUSED *thing, +static herr_t +H5C__prefetched_entry_fsf_size(const void H5_ATTR_UNUSED *thing, hsize_t H5_ATTR_UNUSED *fsf_size_ptr) { FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */ diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index bd200e0..0ba0234 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -150,7 +150,7 @@ * * These flags apply to H5C_protect() * H5C__READ_ONLY_FLAG - * H5C__FLUSH_LAST_FLAG ; super block only + * H5C__FLUSH_LAST_FLAG ; super block only * H5C__FLUSH_COLLECTIVELY_FLAG ; super block only * * These flags apply to H5C_unprotect(): @@ -213,8 +213,8 @@ #define H5C_DO_TAGGING_SANITY_CHECKS 1 #define H5C_DO_EXTREME_SANITY_CHECKS 0 #else /* NDEBUG */ -/* With rare execptions, the following defines should be set - * to 0 if NDEBUG is defined +/* With rare execptions, the following defines should be set + * to 0 if NDEBUG is defined */ #define H5C_DO_SANITY_CHECKS 0 #define H5C_DO_SLIST_SANITY_CHECKS 0 @@ -292,50 +292,50 @@ typedef struct H5C_t H5C_t; * H5C__CLASS_NO_FLAGS_SET: No special processing. * * H5C__CLASS_SPECULATIVE_LOAD_FLAG: This flag is used only in - * H5C_load_entry(). When it is set, entries are - * permitted to change their sizes on the first attempt - * to load. + * H5C_load_entry(). When it is set, entries are + * permitted to change their sizes on the first attempt + * to load. * * If the new size is larger than the old, the read buffer - * is reallocated to the new size, loaded from file, and the - * deserialize routine is called a second time on the - * new buffer. The entry returned by the first call to + * is reallocated to the new size, loaded from file, and the + * deserialize routine is called a second time on the + * new buffer. The entry returned by the first call to * the deserialize routine is discarded (via the free_icr * call) after the new size is retrieved (via the image_len - * call). Note that the new size is used as the size of the + * call). Note that the new size is used as the size of the * entry in the cache. * - * If the new size is smaller than the old, no new loads + * If the new size is smaller than the old, no new loads * or deserializes are performed, but the new size becomes * the size of the entry in the cache. * - * When this flag is set, an attempt to read past the - * end of file could occur. In this case, if the size - * returned get_load_size callback would result in a - * read past the end of file, the size is truncated to + * When this flag is set, an attempt to read past the + * end of file could occur. In this case, if the size + * returned get_load_size callback would result in a + * read past the end of file, the size is truncated to * avoid this, and processing proceeds as normal. * * The following flags may only appear in test code. * * H5C__CLASS_SKIP_READS: This flags is intended only for use in test * code. When it is set, reads on load will be skipped, - * and an uninitialize buffer will be passed to the + * and an uninitialize buffer will be passed to the * deserialize function. * * H5C__CLASS_SKIP_WRITES: This flags is intended only for use in test - * code. When it is set, writes of buffers prepared by the + * code. When it is set, writes of buffers prepared by the * serialize callback will be skipped. * * GET_INITIAL_LOAD_SIZE: Pointer to the 'get initial load size' function. * - * This function determines the size based on the information in the + * This function determines the size based on the information in the * parameter "udata" or an initial speculative guess. The size is * returned in the parameter "image_len_ptr". * * For an entry with H5C__CLASS_NO_FLAGS_SET: * This function returns in "image_len_ptr" the on disk size of the * entry. - * + * * For an entry with H5C__CLASS_SPECULATIVE_LOAD_FLAG: * This function returns in "image_len_ptr" an initial guess of the * entry's on disk size. This many bytes will be loaded from @@ -359,7 +359,7 @@ typedef struct H5C_t H5C_t; * This value is used by the cache to determine the size of * the disk image for the metadata, in order to read the disk * image from the file. - * + * * Processing in the get_load_size function should proceed as follows: * * If successful, the function will place the length in the *image_len_ptr @@ -420,17 +420,17 @@ typedef struct H5C_t H5C_t; * the same as the checksum stored in the metadata. * * It computes the checksum based on the metadata stored in the - * parameter "image_ptr" and the actual length of the metadata in the + * parameter "image_ptr" and the actual length of the metadata in the * parameter "len" which is obtained from the "get_load_size" callback. * * The typedef for the verify_chksum callback is as follows: * - * typedef htri_t (*H5C_verify_chksum_func_t)(const void *image_ptr, - * size_t len, + * typedef htri_t (*H5C_verify_chksum_func_t)(const void *image_ptr, + * size_t len, * void *udata_ptr); * * The parameters of the verify_chksum callback are as follows: - * + * * image_ptr: Pointer to a buffer containing the metadata read in. * * len: The actual length of the metadata. @@ -539,23 +539,23 @@ typedef struct H5C_t H5C_t; * responsible for serializing the data structure, not moving it on disk * or resizing it. * - * In addition, the client may use the pre-serialize callback to - * ensure that the entry is ready to be flushed -- in particular, - * if the entry contains references to other entries that are in + * In addition, the client may use the pre-serialize callback to + * ensure that the entry is ready to be flushed -- in particular, + * if the entry contains references to other entries that are in * temporary file space, the pre-serialize callback must move those - * entries into real file space so that the serialzed entry will + * entries into real file space so that the serialzed entry will * contain no invalid data. * * One would think that the base address and length of - * the length of the entry's image on disk would be well known. + * the length of the entry's image on disk would be well known. * However, that need not be the case as free space section info - * entries will change size (and possibly location) depending on the - * number of blocks of free space being manages, and fractal heap - * direct blocks can change compressed size (and possibly location) + * entries will change size (and possibly location) depending on the + * number of blocks of free space being manages, and fractal heap + * direct blocks can change compressed size (and possibly location) * on serialization if compression is enabled. Similarly, it may * be necessary to move entries from temporary to real file space. * - * The pre-serialize callback must report any such changes to the + * The pre-serialize callback must report any such changes to the * cache, which must then update its internal structures as needed. * * The typedef for the pre-serialize callback is as follows: @@ -575,7 +575,7 @@ typedef struct H5C_t H5C_t; * target entry. * * thing: Pointer to void containing the address of the in core - * representation of the target metadata cache entry. + * representation of the target metadata cache entry. * This is the same pointer returned by a protect of the * addr and len given above. * @@ -587,8 +587,8 @@ typedef struct H5C_t H5C_t; * production mode. * * len: Length in bytes of the in file image of the entry to be - * serialized. Also the size the image passed to the - * serialize callback (discussed below) unless that + * serialized. Also the size the image passed to the + * serialize callback (discussed below) unless that * value is altered by this function. * * This parameter is supplied mainly for sanity checking. @@ -599,10 +599,10 @@ typedef struct H5C_t H5C_t; * new_addr_ptr: Pointer to haddr_t. If the entry is moved by * the serialize function, the new on disk base address must * be stored in *new_addr_ptr, and the appropriate flag set - * in *flags_ptr. + * in *flags_ptr. * - * If the entry is not moved by the serialize function, - * *new_addr_ptr is undefined on pre-serialize callback + * If the entry is not moved by the serialize function, + * *new_addr_ptr is undefined on pre-serialize callback * return. * * new_len_ptr: Pointer to size_t. If the entry is resized by the @@ -610,8 +610,8 @@ typedef struct H5C_t H5C_t; * must be stored in *new_len_ptr, and the appropriate flag set * in *flags_ptr. * - * If the entry is not resized by the pre-serialize function, - * *new_len_ptr is undefined on pre-serialize callback + * If the entry is not resized by the pre-serialize function, + * *new_len_ptr is undefined on pre-serialize callback * return. * * flags_ptr: Pointer to an unsigned integer used to return flags @@ -625,27 +625,27 @@ typedef struct H5C_t H5C_t; * must be stored in *new_len_ptr. * * If the H5C__SERIALIZE_MOVED_FLAG flag is set, the - * new image base address must be stored in *new_addr_ptr. + * new image base address must be stored in *new_addr_ptr. * * Processing in the pre-serialize function should proceed as follows: * * The pre-serialize function must examine the in core representation * indicated by the thing parameter, if the pre-serialize function does * not need to change the size or location of the on-disk image, it must - * set *flags_ptr to zero. + * set *flags_ptr to zero. * * If the size of the on-disk image must be changed, the pre-serialize * function must load the length of the new image into *new_len_ptr, and - * set the H5C__SERIALIZE_RESIZED_FLAG in *flags_ptr. + * set the H5C__SERIALIZE_RESIZED_FLAG in *flags_ptr. * * If the base address of the on disk image must be changed, the * pre-serialize function must set *new_addr_ptr to the new base address, * and set the H5C__SERIALIZE_MOVED_FLAG in *flags_ptr. * - * In addition, the pre-serialize callback may perform any other + * In addition, the pre-serialize callback may perform any other * processing required before the entry is written to disk * - * If it is successful, the function must return SUCCEED. + * If it is successful, the function must return SUCCEED. * * If it fails for any reason, the function must return FAIL and * push error information on the error stack with the error API @@ -660,11 +660,11 @@ typedef struct H5C_t H5C_t; * * At this point, the base address and length of the entry's image on * disk must be well known and not change during the serialization - * process. + * process. * - * While any size and/or location changes must have been handled - * by a pre-serialize call, the client may elect to handle any other - * changes to the entry required to place it in correct form for + * While any size and/or location changes must have been handled + * by a pre-serialize call, the client may elect to handle any other + * changes to the entry required to place it in correct form for * writing to disk in this call. * * The typedef for the serialize callback is as follows: @@ -698,20 +698,20 @@ typedef struct H5C_t H5C_t; * production mode. * * thing: Pointer to void containing the address of the in core - * representation of the target metadata cache entry. + * representation of the target metadata cache entry. * This is the same pointer returned by a protect of the * addr and len given above. * * Processing in the serialize function should proceed as follows: * - * If there are any remaining changes to the entry required before + * If there are any remaining changes to the entry required before * write to disk, they must be dealt with first. * - * The serialize function must then examine the in core - * representation indicated by the thing parameter, and write a - * serialized image of its contents into the provided buffer. + * The serialize function must then examine the in core + * representation indicated by the thing parameter, and write a + * serialized image of its contents into the provided buffer. * - * If it is successful, the function must return SUCCEED. + * If it is successful, the function must return SUCCEED. * * If it fails for any reason, the function must return FAIL and * push error information on the error stack with the error API @@ -737,25 +737,25 @@ typedef struct H5C_t H5C_t; * thing: Pointer to void containing the address of the in core * representation of the target metadata cache entry. This * is the same pointer that would be returned by a protect - * of the addr and len of the entry. + * of the addr and len of the entry. * * Processing in the notify function should proceed as follows: * * The notify function may perform any action it would like, including * metadata cache calls. * - * If the function is successful, it must return SUCCEED. + * If the function is successful, it must return SUCCEED. * * If it fails for any reason, the function must return FAIL and * push error information on the error stack with the error API - * routines. + * routines. * * * FREE_ICR: Pointer to the free ICR callback. * * The free ICR callback is invoked by the metadata cache when it * wishes to evict an entry, and needs the client to free the memory - * allocated for the in core representation. + * allocated for the in core representation. * * The typedef for the free ICR callback is as follows: * @@ -766,18 +766,18 @@ typedef struct H5C_t H5C_t; * thing: Pointer to void containing the address of the in core * representation of the target metadata cache entry. This * is the same pointer that would be returned by a protect - * of the addr and len of the entry. + * of the addr and len of the entry. * * Processing in the free ICR function should proceed as follows: * * The free ICR function must free all memory allocated to the - * in core representation. + * in core representation. * - * If the function is successful, it must return SUCCEED. + * If the function is successful, it must return SUCCEED. * * If it fails for any reason, the function must return FAIL and * push error information on the error stack with the error API - * routines. + * routines. * * At least when compiled with debug, it would be useful if the * free ICR call would fail if the in core representation has been @@ -787,30 +787,30 @@ typedef struct H5C_t H5C_t; * * In principle, there is no need for the get file space free size * callback. However, as an optimization, it is sometimes convenient - * to allocate and free file space for a number of cache entries + * to allocate and free file space for a number of cache entries * simultaneously in a single contiguous block of file space. * * File space allocation is done by the client, so the metadata cache * need not be involved. However, since the metadata cache typically - * handles file space release when an entry is destroyed, some + * handles file space release when an entry is destroyed, some * adjustment on the part of the metadata cache is required for this * operation. * - * The get file space free size callback exists to support this + * The get file space free size callback exists to support this * operation. * - * If a group of cache entries that were allocated as a group are to + * If a group of cache entries that were allocated as a group are to * be discarded and their file space released, the type of the first - * (i.e. lowest address) entry in the group must implement the - * get free file space size callback. + * (i.e. lowest address) entry in the group must implement the + * get free file space size callback. * - * To free the file space of all entries in the group in a single - * operation, first expunge all entries other than the first without - * the free file space flag. + * To free the file space of all entries in the group in a single + * operation, first expunge all entries other than the first without + * the free file space flag. * * Then, to complete the operation, unprotect or expunge the first - * entry in the block with the free file space flag set. Since - * the get free file space callback is implemented, the metadata + * entry in the block with the free file space flag set. Since + * the get free file space callback is implemented, the metadata * cache will use this callback to get the size of the block to be * freed, instead of using the size of the entry as is done otherwise. * @@ -830,12 +830,12 @@ typedef struct H5C_t H5C_t; * call of the associated addr and len. * * fs_size_ptr: Pointer to hsize_t in which the callback will return - * the size of the piece of file space to be freed. Note - * that the space to be freed is presumed to have the same + * the size of the piece of file space to be freed. Note + * that the space to be freed is presumed to have the same * base address as the cache entry. * * The function simply returns the size of the block of file space - * to be freed in *fsf_size_ptr. + * to be freed in *fsf_size_ptr. * * If the function is successful, it must return SUCCEED. * @@ -847,16 +847,16 @@ typedef struct H5C_t H5C_t; /* Actions that can be reported to 'notify' client callback */ typedef enum H5C_notify_action_t { - H5C_NOTIFY_ACTION_AFTER_INSERT, /* Entry has been added to the cache + H5C_NOTIFY_ACTION_AFTER_INSERT, /* Entry has been added to the cache * via the insert call */ - H5C_NOTIFY_ACTION_AFTER_LOAD, /* Entry has been loaded into the + H5C_NOTIFY_ACTION_AFTER_LOAD, /* Entry has been loaded into the * from file via the protect call */ H5C_NOTIFY_ACTION_AFTER_FLUSH, /* Entry has just been flushed to * file. */ - H5C_NOTIFY_ACTION_BEFORE_EVICT, /* Entry is about to be evicted + H5C_NOTIFY_ACTION_BEFORE_EVICT, /* Entry is about to be evicted * from cache. */ H5C_NOTIFY_ACTION_ENTRY_DIRTIED, /* Entry has been marked dirty. */ @@ -911,45 +911,45 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr, * * H5C_ring_t & associated #defines * - * The metadata cache uses the concept of rings to order the flushes of - * classes of entries. In this arrangement, each entry in the cache is - * assigned to a ring, and on flush, the members of the outermost ring + * The metadata cache uses the concept of rings to order the flushes of + * classes of entries. In this arrangement, each entry in the cache is + * assigned to a ring, and on flush, the members of the outermost ring * are flushed first, followed by the next outermost, and so on with the - * members of the innermost ring being flushed last. + * members of the innermost ring being flushed last. * - * Note that flush dependencies are used to order flushes within rings. + * Note that flush dependencies are used to order flushes within rings. * * Note also that at the conceptual level, rings are argueably superfluous, - * as a similar effect could be obtained via the flush dependency mechanism. - * However, this would require all entries in the cache to participate in a - * flush dependency -- with the implied setup and takedown overhead and - * added complexity. Further, the flush ordering between rings need only - * be enforced on flush operations, and thus the use of flush dependencies - * instead would apply unnecessary constraints on flushes under normal + * as a similar effect could be obtained via the flush dependency mechanism. + * However, this would require all entries in the cache to participate in a + * flush dependency -- with the implied setup and takedown overhead and + * added complexity. Further, the flush ordering between rings need only + * be enforced on flush operations, and thus the use of flush dependencies + * instead would apply unnecessary constraints on flushes under normal * operating circumstances. * - * As of this writing, all metadata entries pretaining to data sets and - * groups must be flushed first, and are thus assigned to the outermost - * ring. + * As of this writing, all metadata entries pretaining to data sets and + * groups must be flushed first, and are thus assigned to the outermost + * ring. * * Free space managers managing file space must be flushed next, * and are assigned to the second and third outermost rings. Two rings * are used here as the raw data free space manager must be flushed before * the metadata free space manager. * - * The object header and associated chunks used to implement superblock - * extension messages must be flushed next, and are thus assigned to + * The object header and associated chunks used to implement superblock + * extension messages must be flushed next, and are thus assigned to * the fourth outermost ring. * - * The superblock proper must be flushed last, and is thus assigned to + * The superblock proper must be flushed last, and is thus assigned to * the innermost ring. * * The H5C_ring_t and the associated #defines below are used to define - * the rings. Each entry must be assigned to the appropriate ring on + * the rings. Each entry must be assigned to the appropriate ring on * insertion or protect. * - * Note that H5C_ring_t was originally an enumerated type. It was - * converted to an integer and a set of #defines for convenience in + * Note that H5C_ring_t was originally an enumerated type. It was + * converted to an integer and a set of #defines for convenience in * debugging. */ @@ -1015,8 +1015,8 @@ typedef int H5C_ring_t; * dynamically allocated block of size bytes in which the * on disk image of the metadata cache entry is stored. * - * If the entry is dirty, the pre-serialize and serialize - * callbacks must be used to update this image before it is + * If the entry is dirty, the pre-serialize and serialize + * callbacks must be used to update this image before it is * written to disk * * image_up_to_date: Boolean flag that is set to TRUE when *image_ptr @@ -1038,14 +1038,14 @@ typedef int H5C_ring_t; * dirtied while protected. * * This field is set to FALSE in the protect call, and may - * be set to TRUE by the H5C_mark_entry_dirty() call at any + * be set to TRUE by the H5C_mark_entry_dirty() call at any * time prior to the unprotect call. * - * The H5C_mark_entry_dirty() call exists as a convenience - * function for the fractal heap code which may not know if - * an entry is protected or pinned, but knows that is either - * protected or pinned. The dirtied field was added as in - * the parallel case, it is necessary to know whether a + * The H5C_mark_entry_dirty() call exists as a convenience + * function for the fractal heap code which may not know if + * an entry is protected or pinned, but knows that is either + * protected or pinned. The dirtied field was added as in + * the parallel case, it is necessary to know whether a * protected entry is dirty prior to the protect call. * * is_protected: Boolean flag indicating whether this entry is protected @@ -1090,9 +1090,9 @@ typedef int H5C_ring_t; * policy code (LRU at present). * * 2) A pinned entry can be accessed or modified at any time. - * This places an extra burden on the pre-serialize and - * serialize callbacks, which must ensure that a pinned - * entry is consistent and ready to write to disk before + * This places an extra burden on the pre-serialize and + * serialize callbacks, which must ensure that a pinned + * entry is consistent and ready to write to disk before * generating an image. * * 3) A pinned entry can be marked as dirty (and possibly @@ -1121,9 +1121,9 @@ typedef int H5C_ring_t; * flushed from the cache until all other entries without * the flush_me_last flag set have been flushed. * - * Note: - * - * At this time, the flush_me_last + * Note: + * + * At this time, the flush_me_last * flag will only be applied to one entry, the superblock, * and the code utilizing these flags is protected with HDasserts * to enforce this. This restraint can certainly be relaxed in @@ -1149,11 +1149,11 @@ typedef int H5C_ring_t; * the unprotect, the entry's is_dirty flag is reset by flushing * it with the H5C__FLUSH_CLEAR_ONLY_FLAG. * - * flush_immediately: Boolean flag used only in Phdf5 -- and then only + * flush_immediately: Boolean flag used only in Phdf5 -- and then only * for H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED. * - * When a distributed metadata write is triggered at a - * sync point, this field is used to mark entries that + * When a distributed metadata write is triggered at a + * sync point, this field is used to mark entries that * must be flushed before leaving the sync point. At all * other times, this field should be set to FALSE. * @@ -1167,10 +1167,10 @@ typedef int H5C_ring_t; * * Fields supporting rings for flush ordering: * - * All entries in the metadata cache are assigned to a ring. On cache + * All entries in the metadata cache are assigned to a ring. On cache * flush, all entries in the outermost ring are flushed first, followed - * by all members of the next outermost ring, and so on until the - * innermost ring is flushed. Note that this ordering is ONLY applied + * by all members of the next outermost ring, and so on until the + * innermost ring is flushed. Note that this ordering is ONLY applied * in flush and serialize calls. Rings are ignored during normal operations * in which entries are flushed as directed by the replacement policy. * @@ -1211,7 +1211,7 @@ typedef int H5C_ring_t; * this field is nonzero, then this entry cannot be flushed. * * flush_dep_nunser_children: Number of flush dependency children - * that are either unserialized, or have a non-zero number of + * that are either unserialized, or have a non-zero number of * positive number of unserialized children. * * Note that since there is no requirement that a clean entry @@ -1219,7 +1219,7 @@ typedef int H5C_ring_t; * to be greater than flush_dep_ndirty_children. * * This field exist to facilitate correct ordering of entry - * serializations when it is necessary to serialize all the + * serializations when it is necessary to serialize all the * entries in the metadata cache. Thus in the cache * serialization, no entry can be serialized unless this * field contains 0. @@ -1232,7 +1232,7 @@ typedef int H5C_ring_t; * * Addendum: JRM -- 10/14/15 * - * We have come to scan all entries in the cache frequently enough that + * We have come to scan all entries in the cache frequently enough that * the cost of doing so by scanning the hash table has become unacceptable. * To reduce this cost, the index now also maintains a doubly linked list * of all entries in the index. This list is known as the index list. @@ -1251,12 +1251,12 @@ typedef int H5C_ring_t; * * il_next: Next pointer used by the index to maintain a doubly linked * list of all entries in the index (and thus in the cache). - * This field contains a pointer to the next entry in the + * This field contains a pointer to the next entry in the * index list, or NULL if there is no next entry. * * il_prev: Prev pointer used by the index to maintain a doubly linked * list of all entries in the index (and thus in the cache). - * This field contains a pointer to the previous entry in the + * This field contains a pointer to the previous entry in the * index list, or NULL if there is no previous entry. * * @@ -1294,71 +1294,71 @@ typedef int H5C_ring_t; * The use of the replacement policy fields under the Modified LRU policy * is discussed below: * - * next: Next pointer in either the LRU, the protected list, or - * the pinned list depending on the current values of - * is_protected and is_pinned. If there is no next entry + * next: Next pointer in either the LRU, the protected list, or + * the pinned list depending on the current values of + * is_protected and is_pinned. If there is no next entry * on the list, this field should be set to NULL. * * prev: Prev pointer in either the LRU, the protected list, - * or the pinned list depending on the current values of - * is_protected and is_pinned. If there is no previous + * or the pinned list depending on the current values of + * is_protected and is_pinned. If there is no previous * entry on the list, this field should be set to NULL. * * aux_next: Next pointer on either the clean or dirty LRU lists. - * This entry should be NULL when either is_protected or - * is_pinned is true. + * This entry should be NULL when either is_protected or + * is_pinned is true. * - * When is_protected and is_pinned are false, and is_dirty is - * true, it should point to the next item on the dirty LRU - * list. + * When is_protected and is_pinned are false, and is_dirty is + * true, it should point to the next item on the dirty LRU + * list. * - * When is_protected and is_pinned are false, and is_dirty is - * false, it should point to the next item on the clean LRU - * list. In either case, when there is no next item, it + * When is_protected and is_pinned are false, and is_dirty is + * false, it should point to the next item on the clean LRU + * list. In either case, when there is no next item, it * should be NULL. * * aux_prev: Previous pointer on either the clean or dirty LRU lists. - * This entry should be NULL when either is_protected or - * is_pinned is true. - * - * When is_protected and is_pinned are false, and is_dirty is - * true, it should point to the previous item on the dirty - * LRU list. - * - * When is_protected and is_pinned are false, and is_dirty - * is false, it should point to the previous item on the - * clean LRU list. - * - * In either case, when there is no previous item, it should + * This entry should be NULL when either is_protected or + * is_pinned is true. + * + * When is_protected and is_pinned are false, and is_dirty is + * true, it should point to the previous item on the dirty + * LRU list. + * + * When is_protected and is_pinned are false, and is_dirty + * is false, it should point to the previous item on the + * clean LRU list. + * + * In either case, when there is no previous item, it should * be NULL. * * Fields supporting the cache image feature: * - * The following fields are used to store data about the entry which must - * be stored in the cache image block, but which will typically be either - * lost or heavily altered in the process of serializing the cache and + * The following fields are used to store data about the entry which must + * be stored in the cache image block, but which will typically be either + * lost or heavily altered in the process of serializing the cache and * preparing its contents to be copied into the cache image block. * * Some fields are also used in loading the contents of the metadata cache - * image back into the cache, and in managing such entries until they are - * either protected by the library (at which point they become regular - * entries) or are evicted. See discussion of the prefetched field for + * image back into the cache, and in managing such entries until they are + * either protected by the library (at which point they become regular + * entries) or are evicted. See discussion of the prefetched field for * further details. * * include_in_image: Boolean flag indicating whether this entry should * be included in the metadata cache image. This field should * always be false prior to the H5C_prep_for_file_close() call. * During that call, it should be set to TRUE for all entries - * that are to be included in the metadata cache image. At - * present, only the superblock, the superblock extension - * object header and its chunks (if any) are omitted from + * that are to be included in the metadata cache image. At + * present, only the superblock, the superblock extension + * object header and its chunks (if any) are omitted from * the image. * * lru_rank: Rank of the entry in the LRU just prior to file close. * * Note that the first entry on the LRU has lru_rank 1, - * and that entries not on the LRU at that time will have - * either lru_rank -1 (if pinned) or 0 (if loaded during + * and that entries not on the LRU at that time will have + * either lru_rank -1 (if pinned) or 0 (if loaded during * the process of flushing the cache. * * image_dirty: Boolean flag indicating whether the entry should be marked @@ -1366,125 +1366,125 @@ typedef int H5C_ring_t; * TRUE iff the entry is dirty when H5C_prep_for_file_close() * is called. * - * fd_parent_count: If the entry is a child in one or more flush dependency - * relationships, this field contains the number of flush + * fd_parent_count: If the entry is a child in one or more flush dependency + * relationships, this field contains the number of flush * dependency parents. * * In all other cases, the field is set to zero. * - * Note that while this count is initially taken from the - * flush dependency fields above, if the entry is in the + * Note that while this count is initially taken from the + * flush dependency fields above, if the entry is in the * cache image (i.e. include_in_image is TRUE), any parents * that are not in the image are removed from this count and * from the fd_parent_addrs array below. * - * Finally observe that if the entry is dirty and in the + * Finally observe that if the entry is dirty and in the * cache image, and its parent is dirty and not in the cache * image, then the entry must be removed from the cache image * to avoid violating the flush dependency flush ordering. * - * fd_parent_addrs: If the entry is a child in one or more flush dependency - * relationship when H5C_prep_for_file_close() is called, this - * field must contain a pointer to an array of size - * fd_parent_count containing the on disk addresses of the + * fd_parent_addrs: If the entry is a child in one or more flush dependency + * relationship when H5C_prep_for_file_close() is called, this + * field must contain a pointer to an array of size + * fd_parent_count containing the on disk addresses of the * parent. * * In all other cases, the field is set to NULL. * - * Note that while this list of addresses is initially taken - * from the flush dependency fields above, if the entry is in the + * Note that while this list of addresses is initially taken + * from the flush dependency fields above, if the entry is in the * cache image (i.e. include_in_image is TRUE), any parents - * that are not in the image are removed from this list, and + * that are not in the image are removed from this list, and * and from the fd_parent_count above. * - * Finally observe that if the entry is dirty and in the + * Finally observe that if the entry is dirty and in the * cache image, and its parent is dirty and not in the cache * image, then the entry must be removed from the cache image * to avoid violating the flush dependency flush ordering. * - * fd_child_count: If the entry is a parent in a flush dependency - * relationship, this field contains the number of flush + * fd_child_count: If the entry is a parent in a flush dependency + * relationship, this field contains the number of flush * dependency children. * * In all other cases, the field is set to zero. * - * Note that while this count is initially taken from the - * flush dependency fields above, if the entry is in the + * Note that while this count is initially taken from the + * flush dependency fields above, if the entry is in the * cache image (i.e. include_in_image is TRUE), any children * that are not in the image are removed from this count. * - * fd_dirty_child_count: If the entry is a parent in a flush dependency - * relationship, this field contains the number of dirty flush + * fd_dirty_child_count: If the entry is a parent in a flush dependency + * relationship, this field contains the number of dirty flush * dependency children. * * In all other cases, the field is set to zero. * - * Note that while this count is initially taken from the - * flush dependency fields above, if the entry is in the - * cache image (i.e. include_in_image is TRUE), any dirty - * children that are not in the image are removed from this + * Note that while this count is initially taken from the + * flush dependency fields above, if the entry is in the + * cache image (i.e. include_in_image is TRUE), any dirty + * children that are not in the image are removed from this * count. * * image_fd_height: Flush dependency height of the entry in the cache image. * - * The flush dependency height of any entry involved in a - * flush dependency relationship is defined to be the + * The flush dependency height of any entry involved in a + * flush dependency relationship is defined to be the * longest flush dependency path from that entry to an entry - * with no flush dependency children. + * with no flush dependency children. * - * Since the image_fd_height is used to order entries in the - * cache image so that fd parents preceed fd children, for + * Since the image_fd_height is used to order entries in the + * cache image so that fd parents preceed fd children, for * purposes of this field, and entry is at flush dependency * level 0 if it either has no children, or if all of its - * children are not in the cache image. + * children are not in the cache image. * - * Note that if a child in a flush dependency relationship is + * Note that if a child in a flush dependency relationship is * dirty and in the cache image, and its parent is dirty and - * not in the cache image, then the child must be excluded + * not in the cache image, then the child must be excluded * from the cache image to maintain flush ordering. * * prefetched: Boolean flag indicating that the on disk image of the entry - * has been loaded into the cache prior any request for the + * has been loaded into the cache prior any request for the * entry by the rest of the library. * - * As of this writing (8/10/15), this can only happen through - * the load of a cache image block, although other scenarios - * are contemplated for the use of this feature. Note that - * unlike the usual prefetch situation, this means that a - * prefetched entry can be dirty, and/or can be a party to - * flush dependency relationship(s). This complicates matters + * As of this writing (8/10/15), this can only happen through + * the load of a cache image block, although other scenarios + * are contemplated for the use of this feature. Note that + * unlike the usual prefetch situation, this means that a + * prefetched entry can be dirty, and/or can be a party to + * flush dependency relationship(s). This complicates matters * somewhat. * - * The essential feature of a prefetched entry is that it - * consists only of a buffer containing the on disk image of - * the entry. Thus it must be deserialized before it can - * be passed back to the library on a protect call. This + * The essential feature of a prefetched entry is that it + * consists only of a buffer containing the on disk image of + * the entry. Thus it must be deserialized before it can + * be passed back to the library on a protect call. This * task is handled by H5C_deserialized_prefetched_entry(). - * In essence, this routine calls the deserialize callback - * provided in the protect call with the on disk image, + * In essence, this routine calls the deserialize callback + * provided in the protect call with the on disk image, * deletes the prefetched entry from the cache, and replaces * it with the deserialized entry returned by the deserialize * callback. * - * Further, if the prefetched entry is a flush dependency parent, - * all its flush dependency children (which must also be - * prefetched entries), must be transferred to the new cache + * Further, if the prefetched entry is a flush dependency parent, + * all its flush dependency children (which must also be + * prefetched entries), must be transferred to the new cache * entry returned by the deserialization callback. * - * Finally, if the prefetched entry is a flush dependency child, - * this flush dependency must be destroyed prior to the + * Finally, if the prefetched entry is a flush dependency child, + * this flush dependency must be destroyed prior to the * deserialize call. * - * In addition to the above special processing on the first + * In addition to the above special processing on the first * protect call on a prefetched entry (after which is no longer - * a prefetched entry), prefetched entries also require special + * a prefetched entry), prefetched entries also require special * tretment on flush and evict. * - * On flush, a dirty prefetched entry must simply be written - * to disk and marked clean without any call to any client + * On flush, a dirty prefetched entry must simply be written + * to disk and marked clean without any call to any client * callback. * - * On eviction, if a prefetched entry is a flush dependency + * On eviction, if a prefetched entry is a flush dependency * child, that flush dependency relationship must be destroyed * just prior to the eviction. If the flush dependency code * is working properly, it should be impossible for any entry @@ -1496,35 +1496,35 @@ typedef int H5C_ring_t; * * The value of this field is undefined in prefetched is FALSE. * - * age: Number of times a prefetched entry has appeared in - * subsequent cache images. The field exists to allow - * imposition of a limit on how many times a prefetched + * age: Number of times a prefetched entry has appeared in + * subsequent cache images. The field exists to allow + * imposition of a limit on how many times a prefetched * entry can appear in subsequent cache images without being * converted to a regular entry. * - * This field must be zero if prefetched is FALSE. + * This field must be zero if prefetched is FALSE. * * prefetched_dirty: Boolean field that must be set to FALSE unless the * following conditions hold: * * 1) The file has been opened R/O. * - * 2) The entry is either a prefetched entry, or was + * 2) The entry is either a prefetched entry, or was * re-constructed from a prefetched entry. * * 3) The base prefetched entry was marked dirty. * - * This field exists to solve the following problem with + * This field exists to solve the following problem with * files containing cache images that are opened R/O. * * If the cache image contains a dirty entry, that entry * must be marked clean when it is inserted into the cache - * in the read-only case, as otherwise the metadata cache - * will attempt to flush it on file close -- which is poor + * in the read-only case, as otherwise the metadata cache + * will attempt to flush it on file close -- which is poor * form in the read-only case. * - * However, since the entry is marked clean, it is possible - * that the metadata cache will evict it if the size of the + * However, since the entry is marked clean, it is possible + * that the metadata cache will evict it if the size of the * metadata in the file exceeds the size of the metadata cache, * and the application visits much of this data. * @@ -1534,24 +1534,24 @@ typedef int H5C_ring_t; * the entry has ever been written to it assigned location in * the file. * - * With this background, the purpose of this field should be - * obvious -- when set, it allows the eviction candidate - * selection code to skip over the entry, thus avoiding the + * With this background, the purpose of this field should be + * obvious -- when set, it allows the eviction candidate + * selection code to skip over the entry, thus avoiding the * issue. * - * Since the issue only arises in the R/O case, there is - * no possible interaction with SWMR. There are also + * Since the issue only arises in the R/O case, there is + * no possible interaction with SWMR. There are also * potential interactions with Evict On Close -- at present, * we deal with this by disabling EOC in the R/O case. * - * serialization_count: Integer field used to maintain a count of the - * number of times each entry is serialized during cache + * serialization_count: Integer field used to maintain a count of the + * number of times each entry is serialized during cache * serialization. While no entry should be serialized more than - * once in any serialization call, throw an assertion if any - * flush depencency parent is serialized more than once during + * once in any serialization call, throw an assertion if any + * flush depencency parent is serialized more than once during * a single cache serialization. * - * This is a debugging field, and thus is maintained only if + * This is a debugging field, and thus is maintained only if * NDEBUG is undefined. * * Fields supporting tagged entries: @@ -1688,7 +1688,7 @@ typedef struct H5C_cache_entry_t { * structure H5C_image_entry_t * * Instances of the H5C_image_entry_t structure are used to store data on - * metadata cache entries used in the construction of the metadata cache + * metadata cache entries used in the construction of the metadata cache * image block. In essence this structure is a greatly simplified version * of H5C_cache_entry_t. * @@ -1705,17 +1705,17 @@ typedef struct H5C_cache_entry_t { * * size: Length of the cache entry on disk in bytes. * - * ring: Instance of H5C_ring_t indicating the flush ordering ring + * ring: Instance of H5C_ring_t indicating the flush ordering ring * to which this entry is assigned. * - * age: Number of times this prefetech entry has appeared in - * the current sequence of cache images. This field is + * age: Number of times this prefetech entry has appeared in + * the current sequence of cache images. This field is * initialized to 0 if the instance of H5C_image_entry_t - * is constructed from a regular entry. + * is constructed from a regular entry. * - * If the instance is constructed from a prefetched entry + * If the instance is constructed from a prefetched entry * currently residing in the metadata cache, the field is - * set to 1 + the age of the prefetched entry, or to + * set to 1 + the age of the prefetched entry, or to * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX if that sum exceeds * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX. * @@ -1724,8 +1724,8 @@ typedef struct H5C_cache_entry_t { * lru_rank: Rank of the entry in the LRU just prior to file close. * * Note that the first entry on the LRU has lru_rank 1, - * and that entries not on the LRU at that time will have - * either lru_rank -1 (if pinned) or 0 (if loaded during + * and that entries not on the LRU at that time will have + * either lru_rank -1 (if pinned) or 0 (if loaded during * the process of flushing the cache. * * is_dirty: Boolean flag indicating whether the contents of the cache @@ -1757,9 +1757,9 @@ typedef struct H5C_cache_entry_t { * In all other cases, the field is set to zero. * * Note that while this count is initially taken from the - * flush dependency fields in the associated instance of - * H5C_cache_entry_t, if the entry is in the cache image - * (i.e. include_in_image is TRUE), any parents that are + * flush dependency fields in the associated instance of + * H5C_cache_entry_t, if the entry is in the cache image + * (i.e. include_in_image is TRUE), any parents that are * not in the image are removed from this count and * from the fd_parent_addrs array below. * @@ -1767,7 +1767,7 @@ typedef struct H5C_cache_entry_t { * cache image, and its parent is dirty and not in the cache * image, then the entry must be removed from the cache image * to avoid violating the flush dependency flush ordering. - * This should have happened before the construction of + * This should have happened before the construction of * the instance of H5C_image_entry_t. * * fd_parent_addrs: If the entry is a child in one or more flush dependency @@ -1780,27 +1780,27 @@ typedef struct H5C_cache_entry_t { * * Note that while this list of addresses is initially taken * from the flush dependency fields in the associated instance of - * H5C_cache_entry_t, if the entry is in the cache image - * (i.e. include_in_image is TRUE), any parents that are not - * in the image are removed from this list, and from the + * H5C_cache_entry_t, if the entry is in the cache image + * (i.e. include_in_image is TRUE), any parents that are not + * in the image are removed from this list, and from the * fd_parent_count above. * * Finally observe that if the entry is dirty and in the * cache image, and its parent is dirty and not in the cache * image, then the entry must be removed from the cache image * to avoid violating the flush dependency flush ordering. - * This should have happened before the construction of + * This should have happened before the construction of * the instance of H5C_image_entry_t. * - * fd_child_count: If the entry is a parent in a flush dependency - * relationship, this field contains the number of flush + * fd_child_count: If the entry is a parent in a flush dependency + * relationship, this field contains the number of flush * dependency children. * * In all other cases, the field is set to zero. * * Note that while this count is initially taken from the * flush dependency fields in the associated instance of - * H5C_cache_entry_t, if the entry is in the cache image + * H5C_cache_entry_t, if the entry is in the cache image * (i.e. include_in_image is TRUE), any children * that are not in the image are removed from this count. * @@ -1812,16 +1812,16 @@ typedef struct H5C_cache_entry_t { * * Note that while this count is initially taken from the * flush dependency fields in the associated instance of - * H5C_cache_entry_t, if the entry is in the cache image - * (i.e. include_in_image is TRUE), any dirty children + * H5C_cache_entry_t, if the entry is in the cache image + * (i.e. include_in_image is TRUE), any dirty children * that are not in the image are removed from this count. * * image_ptr: Pointer to void. When not NULL, this field points to a * dynamically allocated block of size bytes in which the * on disk image of the metadata cache entry is stored. * - * If the entry is dirty, the pre-serialize and serialize - * callbacks must be used to update this image before it is + * If the entry is dirty, the pre-serialize and serialize + * callbacks must be used to update this image before it is * written to disk * * @@ -2141,7 +2141,7 @@ typedef struct H5C_auto_size_ctl_t { * fields for generation of a metadata cache image on file close. * * At present control of construction of a cache image is via a FAPL - * property at file open / create. + * property at file open / create. * * The fields of the structure are discussed individually below: * @@ -2150,43 +2150,43 @@ typedef struct H5C_auto_size_ctl_t { * H5C_image_ctl_t passed to the cache must have a known * version number, or an error will be flagged. * - * generate_image: Boolean flag indicating whether a cache image should + * generate_image: Boolean flag indicating whether a cache image should * be created on file close. * - * save_resize_status: Boolean flag indicating whether the cache image + * save_resize_status: Boolean flag indicating whether the cache image * should include the adaptive cache resize configuration and status. * Note that this field is ignored at present. * * entry_ageout: Integer field indicating the maximum number of * times a prefetched entry can appear in subsequent cache images. - * This field exists to allow the user to avoid the buildup of + * This field exists to allow the user to avoid the buildup of * infrequently used entries in long sequences of cache images. * * The value of this field must lie in the range - * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE (-1) to + * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE (-1) to * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX (100). * - * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE means that no limit + * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE means that no limit * is imposed on number of times a prefeteched entry can appear * in subsequent cache images. * - * A value of 0 prevents prefetched entries from being included + * A value of 0 prevents prefetched entries from being included * in cache images. * * Positive integers restrict prefetched entries to the specified * number of appearances. - * + * * Note that the number of subsequent cache images that a prefetched * entry has appeared in is tracked in an 8 bit field. Thus, while - * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX can be increased from its - * current value, any value in excess of 255 will be the functional + * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX can be increased from its + * current value, any value in excess of 255 will be the functional * equivalent of H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE. * * flags: Unsigned integer containing flags controlling which aspects of the - * cache image functinality is actually executed. The primary impetus - * behind this field is to allow development of tests for partial - * implementations that will require little if any modification to run - * with the full implementation. In normal operation, all flags should + * cache image functinality is actually executed. The primary impetus + * behind this field is to allow development of tests for partial + * implementations that will require little if any modification to run + * with the full implementation. In normal operation, all flags should * be set. * ****************************************************************************/ @@ -2196,7 +2196,7 @@ typedef struct H5C_auto_size_ctl_t { #define H5C_CI__SUPRESS_ENTRY_WRITES ((unsigned)0x0004) #define H5C_CI__WRITE_CACHE_IMAGE ((unsigned)0x0008) -/* This #define must set all defined H5C_CI flags. It is +/* This #define must set all defined H5C_CI flags. It is * used in the default value for instances of H5C_cache_image_ctl_t. * This value will only be modified in test code. */ @@ -2242,7 +2242,7 @@ H5_DLL herr_t H5C_evict(H5F_t *f); H5_DLL herr_t H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flags); H5_DLL herr_t H5C_flush_cache(H5F_t *f, unsigned flags); -H5_DLL herr_t H5C_flush_tagged_entries(H5F_t *f, haddr_t tag); +H5_DLL herr_t H5C_flush_tagged_entries(H5F_t *f, haddr_t tag); H5_DLL herr_t H5C_force_cache_image_load(H5F_t * f); H5_DLL herr_t H5C_evict_tagged_entries(H5F_t *f, haddr_t tag, hbool_t match_global); H5_DLL herr_t H5C_expunge_tag_type_metadata(H5F_t *f, haddr_t tag, int type_id, unsigned flags); @@ -2270,7 +2270,7 @@ H5_DLL void * H5C_get_aux_ptr(const H5C_t *cache_ptr); H5_DLL herr_t H5C_image_stats(H5C_t * cache_ptr, hbool_t print_header); H5_DLL herr_t H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags); -H5_DLL herr_t H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, +H5_DLL herr_t H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, hsize_t len, hbool_t rw); H5_DLL herr_t H5C_mark_entry_dirty(void *thing); H5_DLL herr_t H5C_mark_entry_clean(void *thing); @@ -2309,7 +2309,7 @@ H5_DLL herr_t H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring) H5_DLL herr_t H5C_unsettle_entry_ring(void *thing); H5_DLL herr_t H5C_unsettle_ring(H5F_t * f, H5C_ring_t ring); H5_DLL herr_t H5C_remove_entry(void *thing); -H5_DLL herr_t H5C_cache_image_status(H5F_t * f, hbool_t *load_ci_ptr, +H5_DLL herr_t H5C_cache_image_status(H5F_t * f, hbool_t *load_ci_ptr, hbool_t *write_ci_ptr); H5_DLL hbool_t H5C_cache_image_pending(const H5C_t *cache_ptr); H5_DLL herr_t H5C_get_mdc_image_info(H5C_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len); diff --git a/src/H5Cquery.c b/src/H5Cquery.c index a1267c5..9f1ec31 100644 --- a/src/H5Cquery.c +++ b/src/H5Cquery.c @@ -426,7 +426,7 @@ done: * Function: H5C_get_mdc_image_info * * Purpose: To retrieve the address and size of the cache image in the file. - * + * * Return: SUCCEED on success, and FAIL on failure. * * Programmer: Vailin Choi; March 2017 @@ -447,7 +447,7 @@ H5C_get_mdc_image_info(H5C_t * cache_ptr, haddr_t *image_addr, hsize_t *image_le *image_addr = cache_ptr->image_addr; if(image_len) *image_len = cache_ptr->image_len; - + done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C_get_mdc_image_info() */ diff --git a/src/H5Ctag.c b/src/H5Ctag.c index 807e68d..e92d0e4 100644 --- a/src/H5Ctag.c +++ b/src/H5Ctag.c @@ -56,16 +56,16 @@ /* Typedef for tagged entry iterator callback context - evict tagged entries */ typedef struct { H5F_t *f; /* File pointer for evicting entry */ - hbool_t evicted_entries_last_pass; /* Flag to indicate that an entry - * was evicted when iterating over - * cache + hbool_t evicted_entries_last_pass; /* Flag to indicate that an entry + * was evicted when iterating over + * cache */ - hbool_t pinned_entries_need_evicted;/* Flag to indicate that a pinned - * entry was attempted to be evicted + hbool_t pinned_entries_need_evicted;/* Flag to indicate that a pinned + * entry was attempted to be evicted */ - hbool_t skipped_pf_dirty_entries; /* Flag indicating that one or more + hbool_t skipped_pf_dirty_entries; /* Flag indicating that one or more * entries marked prefetched_dirty - * were encountered and not + * were encountered and not * evicted. */ } H5C_tag_iter_evict_ctx_t; @@ -113,10 +113,10 @@ H5FL_EXTERN(H5C_tag_info_t); * Function: H5C_ignore_tags * * Purpose: Override all assertion frameworks associated with making - * sure proper tags are applied to cache entries. + * sure proper tags are applied to cache entries. * - * NOTE: This should really only be used in tests that need - * to access internal functions without going through + * NOTE: This should really only be used in tests that need + * to access internal functions without going through * standard API paths. Since tags are set inside dxpl_id's * before coming into the cache, any external functions that * use the internal library functions (i.e., tests) should @@ -204,8 +204,8 @@ H5C_get_num_objs_corked(const H5C_t *cache_ptr) * Function: H5C__tag_entry * * Purpose: Tags an entry with the provided tag (contained in the API context). - * If sanity checking is enabled, this function will perform - * validation that a proper tag is contained within the provided + * If sanity checking is enabled, this function will perform + * validation that a proper tag is contained within the provided * data access property list id before application. * * Return: FAIL if error is detected, SUCCEED otherwise. @@ -234,10 +234,10 @@ H5C__tag_entry(H5C_t *cache, H5C_cache_entry_t *entry) if(cache->ignore_tags) { /* if we're ignoring tags, it's because we're running - tests on internal functions and may not have inserted a tag + tests on internal functions and may not have inserted a tag value into a given API context before creating some metadata. Thus, in this case only, if a tag value has not been set, we can - arbitrarily set it to something for the sake of passing the tests. + arbitrarily set it to something for the sake of passing the tests. If the tag value is set, then we'll just let it get assigned without additional checking for correctness. */ if(!H5F_addr_defined(tag)) @@ -399,7 +399,7 @@ H5C__iter_tagged_entries_real(H5C_t *cache, haddr_t tag, H5C_tag_iter_cb_t cb, /* Make callback for entry */ if((cb)(entry, cb_ctx) != H5_ITER_CONT) HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "tagged entry iteration callback failed") - + /* Advance to next entry */ entry = next_entry; } /* end while */ @@ -441,11 +441,11 @@ H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global, HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "iteration of tagged entries failed") /* Check for iterating over global metadata */ - if(match_global) { + if(match_global) { /* Iterate over the entries for SOHM entries */ if(H5C__iter_tagged_entries_real(cache, H5AC__SOHM_TAG, cb, cb_ctx) < 0) HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "iteration of tagged entries failed") - + /* Iterate over the entries for global heap entries */ if(H5C__iter_tagged_entries_real(cache, H5AC__GLOBALHEAP_TAG, cb, cb_ctx) < 0) HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "iteration of tagged entries failed") @@ -553,29 +553,29 @@ H5C_evict_tagged_entries(H5F_t * f, haddr_t tag, hbool_t match_global) /* Keep doing this until we have stopped evicted entries */ } while(TRUE == ctx.evicted_entries_last_pass); - /* In most cases, fail if we have finished evicting entries and pinned - * entries still need evicted + /* In most cases, fail if we have finished evicting entries and pinned + * entries still need evicted * - * However, things can get strange if the file was opened R/O and - * the file contains a cache image and the cache image contains dirty - * entries. + * However, things can get strange if the file was opened R/O and + * the file contains a cache image and the cache image contains dirty + * entries. * - * Since the file was opened read only, dirty entries in the cache + * Since the file was opened read only, dirty entries in the cache * image were marked as clean when they were inserted into the metadata * cache. This is necessary, as if they are marked dirty, the metadata - * cache will attempt to write them on file close, which is frowned + * cache will attempt to write them on file close, which is frowned * upon when the file is opened R/O. * - * On the other hand, such entries (marked prefetched_dirty) must not + * On the other hand, such entries (marked prefetched_dirty) must not * be evicted, as should the cache be asked to re-load them, the cache * will attempt to read them from the file, and at best load an outdated * version. - * - * To avoid this, H5C__evict_tagged_entries_cb has been modified to - * skip such entries. However, by doing so, it may prevent pinned + * + * To avoid this, H5C__evict_tagged_entries_cb has been modified to + * skip such entries. However, by doing so, it may prevent pinned * entries from becoming unpinned. * - * Thus we must ignore ctx.pinned_entries_need_evicted if + * Thus we must ignore ctx.pinned_entries_need_evicted if * ctx.skipped_pf_dirty_entries is TRUE. */ if((!ctx.skipped_pf_dirty_entries) && (ctx.pinned_entries_need_evicted)) @@ -631,7 +631,7 @@ H5C__mark_tagged_entries_cb(H5C_cache_entry_t *entry, void H5_ATTR_UNUSED *_ctx) * *------------------------------------------------------------------------- */ -static herr_t +static herr_t H5C__mark_tagged_entries(H5C_t *cache, haddr_t tag) { herr_t ret_value = SUCCEED; /* Return value */ @@ -698,16 +698,16 @@ H5C_verify_tag(int id, haddr_t tag) if(tag == H5AC__SUPERBLOCK_TAG) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__SUPERBLOCK_TAG applied to non-superblock entry") } /* end else */ - + /* Free Space Manager */ if(tag == H5AC__FREESPACE_TAG && ((id != H5AC_FSPACE_HDR_ID) && (id != H5AC_FSPACE_SINFO_ID))) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__FREESPACE_TAG applied to non-freespace entry") - + /* SOHM */ if((id == H5AC_SOHM_TABLE_ID) || (id == H5AC_SOHM_LIST_ID)) if(tag != H5AC__SOHM_TAG) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "sohm entry not tagged with H5AC__SOHM_TAG") - + /* Global Heap */ if(id == H5AC_GHEAP_ID) { if(tag != H5AC__GLOBALHEAP_TAG) @@ -783,7 +783,7 @@ done: *------------------------------------------------------------------------- */ herr_t -H5C_retag_entries(H5C_t *cache, haddr_t src_tag, haddr_t dest_tag) +H5C_retag_entries(H5C_t *cache, haddr_t src_tag, haddr_t dest_tag) { H5C_tag_info_t *tag_info; /* Points to a tag info struct */ herr_t ret_value = SUCCEED; /* Return value */ @@ -860,7 +860,7 @@ done: * *------------------------------------------------------------------------- */ -herr_t +herr_t H5C_expunge_tag_type_metadata(H5F_t *f, haddr_t tag, int type_id, unsigned flags) { H5C_t *cache; /* Pointer to cache structure */ @@ -904,7 +904,7 @@ done: * *------------------------------------------------------------------------- */ -herr_t +herr_t H5C_get_tag(const void *thing, haddr_t *tag /*OUT*/) { const H5C_cache_entry_t *entry = (const H5C_cache_entry_t *)thing; /* Pointer to cache entry */ diff --git a/src/H5D.c b/src/H5D.c index 61a40df..37b3b39 100644 --- a/src/H5D.c +++ b/src/H5D.c @@ -332,7 +332,7 @@ H5Dclose(hid_t dset_id) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset ID") /* Decrement the counter on the dataset. It will be freed if the count - * reaches zero. + * reaches zero. */ if(H5I_dec_app_ref_always_close(dset_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "can't decrement count on dataset ID") @@ -814,7 +814,7 @@ H5Dflush(hid_t dset_id) FUNC_ENTER_API(FAIL) H5TRACE1("e", "i", dset_id); - + /* Check args */ if(NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dset_id parameter is not a valid dataset identifier") @@ -873,7 +873,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Dformat_convert (Internal) * - * Purpose: For chunked: + * Purpose: For chunked: * Convert the chunk indexing type to version 1 B-tree if not * For compact/contiguous: * Downgrade layout version to 3 if greater than 3 @@ -892,7 +892,7 @@ H5Dformat_convert(hid_t dset_id) { H5VL_object_t *vol_obj; /* Dataset for this operation */ herr_t ret_value = SUCCEED; /* Return value */ - + FUNC_ENTER_API(FAIL) H5TRACE1("e", "i", dset_id); diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c index 56554f8..ae98654 100644 --- a/src/H5Dbtree2.c +++ b/src/H5Dbtree2.c @@ -11,7 +11,7 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* +/* * * Purpose: v2 B-tree indexing for chunked datasets with > 1 unlimited dimensions. * Each dataset chunk in the b-tree is identified by its dimensional offset. @@ -106,7 +106,7 @@ static int H5D__bt2_idx_iterate_cb(const void *_record, void *_udata); /* Callback for H5B2_find() which is called in H5D__bt2_idx_get_addr() */ static herr_t H5D__bt2_found_cb(const void *nrecord, void *op_data); -/* +/* * Callback for H5B2_remove() and H5B2_delete() which is called * in H5D__bt2_idx_remove() and H5D__bt2_idx_delete(). */ @@ -252,7 +252,7 @@ H5D__bt2_crt_context(void *_udata) H5MM_memcpy(my_dim, udata->dim, H5O_LAYOUT_NDIMS * sizeof(uint32_t)); ctx->dim = my_dim; - /* + /* * Compute the size required for encoding the size of a chunk, * allowing for an extra byte, in case the filter makes the chunk larger. */ @@ -292,7 +292,7 @@ H5D__bt2_dst_context(void *_ctx) /* Free array for chunk dimension sizes */ if(ctx->dim) - (void)H5FL_ARR_FREE(uint32_t, ctx->dim); + (void)H5FL_ARR_FREE(uint32_t, ctx->dim); /* Release callback context */ ctx = H5FL_FREE(H5D_bt2_ctx_t, ctx); @@ -569,7 +569,7 @@ H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth, const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */ const H5D_bt2_ctx_t *ctx = (const H5D_bt2_ctx_t *)_ctx; /* Callback context */ unsigned u; /* Local index variable */ - + FUNC_ENTER_STATIC_NOERR /* Sanity checks */ @@ -735,9 +735,9 @@ done: /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_create + * Function: H5D__bt2_idx_create * - * Purpose: Create the v2 B-tree for tracking dataset chunks + * Purpose: Create the v2 B-tree for tracking dataset chunks * * Return: SUCCEED/FAIL * @@ -769,7 +769,7 @@ H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info) if(idx_info->pline->nused > 0) { unsigned chunk_size_len; /* Size of encoded chunk size */ - /* + /* * Compute the size required for encoding the size of a chunk, * allowing for an extra byte, in case the filter makes the chunk larger. */ @@ -837,7 +837,7 @@ H5D__bt2_idx_is_space_alloc(const H5O_storage_chunk_t *storage) * Function: H5D__bt2_mod_cb * * Purpose: Modify record for dataset chunk when it is found in a v2 B-tree. - * This is the callback for H5B2_modify() which is called in + * This is the callback for H5B2_modify() which is called in * H5D__bt2_idx_insert(). * * Return: Success: non-negative @@ -879,7 +879,7 @@ H5D__bt2_mod_cb(void *_record, void *_op_data, hbool_t *changed) * Function: H5D__bt2_idx_insert * * Purpose: Insert chunk address into the indexing structure. - * A non-filtered chunk: + * A non-filtered chunk: * Should not exist * Allocate the chunk and pass chunk address back up * A filtered chunk: @@ -954,7 +954,7 @@ done: * Function: H5D__bt2_found_cb * * Purpose: Retrieve record for dataset chunk when it is found in a v2 B-tree. - * This is the callback for H5B2_find() which is called in + * This is the callback for H5B2_find() which is called in * H5D__bt2_idx_get_addr() and H5D__bt2_idx_insert(). * * Return: Success: non-negative @@ -1073,7 +1073,7 @@ done: * Purpose: Translate the B-tree specific chunk record into a generic * form and make the callback to the generic chunk callback * routine. - * This is the callback for H5B2_iterate() which is called in + * This is the callback for H5B2_iterate() which is called in * H5D__bt2_idx_iterate(). * * Return: Success: Non-negative @@ -1163,7 +1163,7 @@ done: * * Purpose: Free space for 'dataset chunk' object as v2 B-tree * is being deleted or v2 B-tree node is removed. - * This is the callback for H5B2_remove() and H5B2_delete() which + * This is the callback for H5B2_remove() and H5B2_delete() which * which are called in H5D__bt2_idx_remove() and H5D__bt2_idx_delete(). * * Return: Success: non-negative diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index eab08ac..58dfbc5 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -11,31 +11,31 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* Programmer: Quincey Koziol - * Thursday, April 24, 2008 - * - * Purpose: Abstract indexed (chunked) I/O functions. The logical - * multi-dimensional dataspace is regularly partitioned into - * same-sized "chunks", the first of which is aligned with the - * logical origin. The chunks are indexed by different methods, - * that map a chunk index to disk address. Each chunk can be +/* Programmer: Quincey Koziol + * Thursday, April 24, 2008 + * + * Purpose: Abstract indexed (chunked) I/O functions. The logical + * multi-dimensional dataspace is regularly partitioned into + * same-sized "chunks", the first of which is aligned with the + * logical origin. The chunks are indexed by different methods, + * that map a chunk index to disk address. Each chunk can be * compressed independently and the chunks may move around in the * file as their storage requirements change. * - * Cache: Disk I/O is performed in units of chunks and H5MF_alloc() - * contains code to optionally align chunks on disk block - * boundaries for performance. - * - * The chunk cache is an extendible hash indexed by a function - * of storage B-tree address and chunk N-dimensional offset - * within the dataset. Collisions are not resolved -- one of - * the two chunks competing for the hash slot must be preempted - * from the cache. All entries in the hash also participate in - * a doubly-linked list and entries are penalized by moving them - * toward the front of the list. When a new chunk is about to - * be added to the cache the heap is pruned by preempting - * entries near the front of the list to make room for the new - * entry which is added to the end of the list. + * Cache: Disk I/O is performed in units of chunks and H5MF_alloc() + * contains code to optionally align chunks on disk block + * boundaries for performance. + * + * The chunk cache is an extendible hash indexed by a function + * of storage B-tree address and chunk N-dimensional offset + * within the dataset. Collisions are not resolved -- one of + * the two chunks competing for the hash slot must be preempted + * from the cache. All entries in the hash also participate in + * a doubly-linked list and entries are penalized by moving them + * toward the front of the list. When a new chunk is about to + * be added to the cache the heap is pruned by preempting + * entries near the front of the list to make room for the new + * entry which is added to the end of the list. */ /****************/ @@ -48,19 +48,19 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ +#include "H5private.h" /* Generic Functions */ #ifdef H5_HAVE_PARALLEL -#include "H5ACprivate.h" /* Metadata cache */ +#include "H5ACprivate.h" /* Metadata cache */ #endif /* H5_HAVE_PARALLEL */ #include "H5CXprivate.h" /* API Contexts */ -#include "H5Dpkg.h" /* Dataset functions */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5Fprivate.h" /* File functions */ -#include "H5FLprivate.h" /* Free Lists */ -#include "H5Iprivate.h" /* IDs */ -#include "H5MMprivate.h" /* Memory management */ +#include "H5Dpkg.h" /* Dataset functions */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Fprivate.h" /* File functions */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5Iprivate.h" /* IDs */ +#include "H5MMprivate.h" /* Memory management */ #include "H5MFprivate.h" /* File memory management */ -#include "H5VMprivate.h" /* Vector and array functions */ +#include "H5VMprivate.h" /* Vector and array functions */ /****************/ @@ -83,26 +83,26 @@ /* * Feature: If this constant is defined then every cache preemption and load - * causes a character to be printed on the standard error stream: + * causes a character to be printed on the standard error stream: * * `.': Entry was preempted because it has been completely read or - * completely written but not partially read and not partially - * written. This is often a good reason for preemption because such - * a chunk will be unlikely to be referenced in the near future. + * completely written but not partially read and not partially + * written. This is often a good reason for preemption because such + * a chunk will be unlikely to be referenced in the near future. * * `:': Entry was preempted because it hasn't been used recently. * * `#': Entry was preempted because another chunk collided with it. This - * is usually a relatively bad thing. If there are too many of - * these then the number of entries in the cache can be increased. + * is usually a relatively bad thing. If there are too many of + * these then the number of entries in the cache can be increased. * * c: Entry was preempted because the file is closing. * - * w: A chunk read operation was eliminated because the library is - * about to write new values to the entire chunk. This is a good - * thing, especially on files where the chunk size is the same as - * the disk block size, chunks are aligned on disk block boundaries, - * and the operating system can also eliminate a read operation. + * w: A chunk read operation was eliminated because the library is + * about to write new values to the entire chunk. This is a good + * thing, especially on files where the chunk size is the same as + * the disk block size, chunks are aligned on disk block boundaries, + * and the operating system can also eliminate a read operation. */ /*#define H5D_CHUNK_DEBUG */ @@ -119,19 +119,19 @@ /* Raw data chunks are cached. Each entry in the cache is: */ typedef struct H5D_rdcc_ent_t { - hbool_t locked; /*entry is locked in cache */ - hbool_t dirty; /*needs to be written to disk? */ - hbool_t deleted; /*chunk about to be deleted */ + hbool_t locked; /*entry is locked in cache */ + hbool_t dirty; /*needs to be written to disk? */ + hbool_t deleted; /*chunk about to be deleted */ unsigned edge_chunk_state; /*states related to edge chunks (see above) */ - hsize_t scaled[H5O_LAYOUT_NDIMS]; /*scaled chunk 'name' (coordinates) */ - uint32_t rd_count; /*bytes remaining to be read */ - uint32_t wr_count; /*bytes remaining to be written */ + hsize_t scaled[H5O_LAYOUT_NDIMS]; /*scaled chunk 'name' (coordinates) */ + uint32_t rd_count; /*bytes remaining to be read */ + uint32_t wr_count; /*bytes remaining to be written */ H5F_block_t chunk_block; /*offset/length of chunk in file */ - hsize_t chunk_idx; /*index of chunk in dataset */ - uint8_t *chunk; /*the unfiltered chunk data */ - unsigned idx; /*index in hash table */ - struct H5D_rdcc_ent_t *next;/*next item in doubly-linked list */ - struct H5D_rdcc_ent_t *prev;/*previous item in doubly-linked list */ + hsize_t chunk_idx; /*index of chunk in dataset */ + uint8_t *chunk; /*the unfiltered chunk data */ + unsigned idx; /*index in hash table */ + struct H5D_rdcc_ent_t *next;/*next item in doubly-linked list */ + struct H5D_rdcc_ent_t *prev;/*previous item in doubly-linked list */ struct H5D_rdcc_ent_t *tmp_next;/*next item in temporary doubly-linked list */ struct H5D_rdcc_ent_t *tmp_prev;/*previous item in temporary doubly-linked list */ } H5D_rdcc_ent_t; @@ -142,7 +142,7 @@ typedef struct H5D_chunk_it_ud1_t { H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */ const H5D_chk_idx_info_t *idx_info; /* Chunked index info */ const H5D_io_info_t *io_info; /* I/O info for dataset operation */ - const hsize_t *space_dim; /* New dataset dimensions */ + const hsize_t *space_dim; /* New dataset dimensions */ const hbool_t *shrunk_dim; /* Dimensions which have been shrunk */ H5S_t *chunk_space; /* Dataspace for a chunk */ uint32_t elmts_per_chunk;/* Elements in chunk */ @@ -192,12 +192,12 @@ typedef struct H5D_chunk_it_ud3_t { /* needed for getting raw data from chunk cache */ hbool_t chunk_in_cache; - uint8_t *chunk; /* the unfiltered chunk data */ + uint8_t *chunk; /* the unfiltered chunk data */ } H5D_chunk_it_ud3_t; /* Callback info for iteration to dump index */ typedef struct H5D_chunk_it_ud4_t { - FILE *stream; /* Output stream */ + FILE *stream; /* Output stream */ hbool_t header_displayed; /* Node's header is displayed? */ unsigned ndims; /* Number of dimensions for chunk/dataset */ uint32_t *chunk_dim; /* Chunk dimensions */ @@ -207,7 +207,7 @@ typedef struct H5D_chunk_it_ud4_t { typedef struct H5D_chunk_it_ud5_t { H5D_chk_idx_info_t *new_idx_info; /* Dest. chunk index info object */ unsigned dset_ndims; /* Number of dimensions in dataset */ - hsize_t *dset_dims; /* Dataset dimensions */ + hsize_t *dset_dims; /* Dataset dimensions */ } H5D_chunk_it_ud5_t; /* Callback info for nonexistent readvv operation */ @@ -254,7 +254,7 @@ static herr_t H5D__chunk_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id); static herr_t H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, H5D_chunk_map_t *fm); -static herr_t H5D__chunk_io_init_selections(const H5D_io_info_t *io_info, +static herr_t H5D__chunk_io_init_selections(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, H5D_chunk_map_t *fm); static herr_t H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, @@ -321,7 +321,7 @@ static herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info, static herr_t H5D__chunk_cache_prune(const H5D_t *dset, size_t size); static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk); #ifdef H5_HAVE_PARALLEL -static herr_t H5D__chunk_collective_fill(const H5D_t *dset, +static herr_t H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info, size_t chunk_size, const void *fill_buf); static int H5D__chunk_cmp_addr(const void *addr1, const void *addr2); #endif /* H5_HAVE_PARALLEL */ @@ -393,15 +393,15 @@ H5FL_BLK_DEFINE_STATIC(chunk); /* Declare extern free list to manage the H5S_sel_iter_t struct */ H5FL_EXTERN(H5S_sel_iter_t); - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_direct_write + * Function: H5D__chunk_direct_write * - * Purpose: Internal routine to write a chunk directly into the file. + * Purpose: Internal routine to write a chunk directly into the file. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Raymond Lu + * Programmer: Raymond Lu * 30 July 2012 * *------------------------------------------------------------------------- @@ -444,7 +444,7 @@ H5D__chunk_direct_write(const H5D_t *dset, uint32_t filters, hsize_t *offset, HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") /* Sanity check */ - HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || + HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); /* Set the file block information for the old chunk */ @@ -486,7 +486,7 @@ H5D__chunk_direct_write(const H5D_t *dset, uint32_t filters, hsize_t *offset, /* Evict the (old) entry from the cache if present, but do not flush * it to disk */ if(UINT_MAX != udata.idx_hint) { - const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ if(H5D__chunk_cache_evict(dset, rdcc->slot[udata.idx_hint], FALSE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk") @@ -509,7 +509,7 @@ done: FUNC_LEAVE_NOAPI_TAG(ret_value) } /* end H5D__chunk_direct_write() */ - + /*------------------------------------------------------------------------- * Function: H5D__chunk_direct_read * @@ -606,7 +606,7 @@ done: FUNC_LEAVE_NOAPI_TAG(ret_value) } /* end H5D__chunk_direct_read() */ - + /*------------------------------------------------------------------------- * Function: H5D__get_chunk_storage_size * @@ -704,15 +704,15 @@ done: FUNC_LEAVE_NOAPI_TAG(ret_value) } /* H5D__get_chunk_storage_size */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_set_info_real + * Function: H5D__chunk_set_info_real * - * Purpose: Internal routine to set the information about chunks for a dataset + * Purpose: Internal routine to set the information about chunks for a dataset * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Tuesday, June 30, 2009 * *------------------------------------------------------------------------- @@ -742,7 +742,7 @@ H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, /* Sanity check */ if(layout->dim[u] == 0) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "dimension size must be > 0, dim = %u ", u) - + layout->max_chunks[u] = ((max_dims[u] + layout->dim[u]) - 1) / layout->dim[u]; } @@ -761,15 +761,15 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_set_info_real() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_set_info + * Function: H5D__chunk_set_info * - * Purpose: Sets the information about chunks for a dataset + * Purpose: Sets the information about chunks for a dataset * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Tuesday, June 30, 2009 * *------------------------------------------------------------------------- @@ -786,7 +786,7 @@ H5D__chunk_set_info(const H5D_t *dset) /* Set the base layout information */ if(H5D__chunk_set_info_real(&dset->shared->layout.u.chunk, dset->shared->ndims, dset->shared->curr_dims, dset->shared->max_dims) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout's chunk info") + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout's chunk info") /* Call the index's "resize" callback */ if(dset->shared->layout.storage.u.chunk.ops->resize && (dset->shared->layout.storage.u.chunk.ops->resize)(&dset->shared->layout.u.chunk) < 0) @@ -796,15 +796,15 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_set_info() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_set_sizes + * Function: H5D__chunk_set_sizes * * Purpose: Sets chunk and type sizes. * * Return: SUCCEED/FAIL * - * Programmer: Dana Robinson + * Programmer: Dana Robinson * December 2015 * *------------------------------------------------------------------------- @@ -859,15 +859,15 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_set_sizes */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_construct + * Function: H5D__chunk_construct * - * Purpose: Constructs new chunked layout information for dataset + * Purpose: Constructs new chunked layout information for dataset * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Thursday, May 22, 2008 * *------------------------------------------------------------------------- @@ -922,16 +922,16 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_construct() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_init + * Function: H5D__chunk_init * - * Purpose: Initialize the raw data chunk cache for a dataset. This is - * called when the dataset is initialized. + * Purpose: Initialize the raw data chunk cache for a dataset. This is + * called when the dataset is initialized. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Monday, May 18, 1998 * *------------------------------------------------------------------------- @@ -940,7 +940,7 @@ static herr_t H5D__chunk_init(H5F_t *f, const H5D_t * const dset, hid_t dapl_id) { H5D_chk_idx_info_t idx_info; /* Chunked index info */ - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Convenience pointer to dataset's chunk cache */ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Convenience pointer to dataset's chunk cache */ H5P_genplist_t *dapl; /* Data access property list object pointer */ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk); herr_t ret_value = SUCCEED; /* Return value */ @@ -1027,15 +1027,15 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_init() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_is_space_alloc + * Function: H5D__chunk_is_space_alloc * - * Purpose: Query if space is allocated for layout + * Purpose: Query if space is allocated for layout * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Thursday, January 15, 2009 * *------------------------------------------------------------------------- @@ -1058,7 +1058,7 @@ H5D__chunk_is_space_alloc(const H5O_storage_t *storage) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_is_space_alloc() */ - + /*------------------------------------------------------------------------- * Function: H5D__chunk_is_data_cached * @@ -1082,15 +1082,15 @@ H5D__chunk_is_data_cached(const H5D_shared_t *shared_dset) FUNC_LEAVE_NOAPI(shared_dset->cache.chunk.nused > 0) } /* end H5D__chunk_is_data_cached() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_io_init + * Function: H5D__chunk_io_init * - * Purpose: Performs initialization before any sort of I/O on the raw data + * Purpose: Performs initialization before any sort of I/O on the raw data * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Thursday, March 20, 2008 * *------------------------------------------------------------------------- @@ -1106,7 +1106,7 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf unsigned f_ndims; /* The number of dimensions of the file's dataspace */ int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */ unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -1172,15 +1172,15 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_io_init() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_io_init_selections + * Function: H5D__chunk_io_init_selections * - * Purpose: Initialize the chunk mappings + * Purpose: Initialize the chunk mappings * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Thursday, March 20, 2008 * *------------------------------------------------------------------------- @@ -1194,7 +1194,7 @@ H5D__chunk_io_init_selections(const H5D_io_info_t *io_info, const H5D_type_info_ H5T_t *file_type = NULL; /* Temporary copy of file datatype for iteration */ hbool_t iter_init = FALSE; /* Selection iteration info has been initialized */ char bogus; /* "bogus" buffer to pass to selection iterator */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -1347,7 +1347,7 @@ H5D__chunk_io_init_selections(const H5D_io_info_t *io_info, const H5D_type_info_ HGOTO_ERROR(H5E_DATATYPE, H5E_BADSIZE, FAIL, "datatype size invalid") if(H5S_select_iter_init(&(fm->mem_iter), fm->mem_space, elmt_size, 0) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator") - iter_init = TRUE; /* Selection iteration info has been initialized */ + iter_init = TRUE; /* Selection iteration info has been initialized */ iter_op.op_type = H5S_SEL_ITER_OP_LIB; iter_op.u.lib_op = H5D__chunk_mem_cb; @@ -1376,17 +1376,17 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_io_init_selections() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_mem_alloc + * Function: H5D__chunk_mem_alloc * - * Purpose: Allocate space for a chunk in memory. This routine allocates + * Purpose: Allocate space for a chunk in memory. This routine allocates * memory space for non-filtered chunks from a block free list * and uses malloc()/free() for filtered chunks. * - * Return: Pointer to memory for chunk on success/NULL on failure + * Return: Pointer to memory for chunk on success/NULL on failure * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * April 22, 2004 * *------------------------------------------------------------------------- @@ -1394,7 +1394,7 @@ done: static void * H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline) { - void *ret_value = NULL; /* Return value */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_STATIC_NOERR @@ -1408,17 +1408,17 @@ H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline) FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_mem_alloc() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_mem_xfree + * Function: H5D__chunk_mem_xfree * - * Purpose: Free space for a chunk in memory. This routine releases + * Purpose: Free space for a chunk in memory. This routine releases * memory space for non-filtered chunks from a block free list * and uses malloc()/free() for filtered chunks. * - * Return: NULL (never fails) + * Return: NULL (never fails) * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * April 22, 2004 * *------------------------------------------------------------------------- @@ -1440,7 +1440,7 @@ H5D__chunk_mem_xfree(void *chk, const void *_pline) FUNC_LEAVE_NOAPI(NULL) } /* H5D__chunk_mem_xfree() */ - + /*------------------------------------------------------------------------- * Function: H5D__chunk_mem_realloc * @@ -1473,7 +1473,7 @@ H5D__chunk_mem_realloc(void *chk, size_t size, const H5O_pline_t *pline) FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_mem_realloc() */ - + /*-------------------------------------------------------------------------- NAME H5D__free_chunk_info @@ -1516,16 +1516,16 @@ H5D__free_chunk_info(void *item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED * FUNC_LEAVE_NOAPI(0) } /* H5D__free_chunk_info() */ - + /*------------------------------------------------------------------------- - * Function: H5D__create_chunk_map_single + * Function: H5D__create_chunk_map_single * - * Purpose: Create chunk selections when appending a single record + * Purpose: Create chunk selections when appending a single record * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Tuesday, November 20, 2007 + * Programmer: Quincey Koziol + * Tuesday, November 20, 2007 * *------------------------------------------------------------------------- */ @@ -1541,7 +1541,7 @@ H5D__create_chunk_map_single(H5D_chunk_map_t *fm, const H5D_io_info_t hsize_t sel_start[H5O_LAYOUT_NDIMS]; /* Offset of low bound of file selection */ hsize_t sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */ unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -1601,16 +1601,16 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__create_chunk_map_single() */ - + /*------------------------------------------------------------------------- - * Function: H5D__create_chunk_file_map_all + * Function: H5D__create_chunk_file_map_all * - * Purpose: Create all chunk selections in file, for an "all" selection. + * Purpose: Create all chunk selections in file, for an "all" selection. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Monday, January 21, 2019 + * Programmer: Quincey Koziol + * Monday, January 21, 2019 * *------------------------------------------------------------------------- */ @@ -1627,14 +1627,14 @@ H5D__create_chunk_file_map_all(H5D_chunk_map_t *fm, const H5D_io_info_t hsize_t zeros[H5S_MAX_RANK]; /* All zero vector (for start parameter to setting hyperslab on partial chunks) */ hsize_t coords[H5S_MAX_RANK]; /* Current coordinates of chunk */ hsize_t end[H5S_MAX_RANK]; /* Final coordinates of chunk */ - hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ + hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ hsize_t chunk_index; /* "Index" of chunk */ - hsize_t curr_partial_clip[H5S_MAX_RANK]; /* Current partial dimension sizes to clip against */ - hsize_t partial_dim_size[H5S_MAX_RANK]; /* Size of a partial dimension */ + hsize_t curr_partial_clip[H5S_MAX_RANK]; /* Current partial dimension sizes to clip against */ + hsize_t partial_dim_size[H5S_MAX_RANK]; /* Size of a partial dimension */ hbool_t is_partial_dim[H5S_MAX_RANK]; /* Whether a dimension is currently a partial chunk */ unsigned num_partial_dims; /* Current number of partial dimensions */ unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -1758,7 +1758,7 @@ H5D__create_chunk_file_map_all(H5D_chunk_map_t *fm, const H5D_io_info_t end[curr_dim] = fm->chunk_dim[curr_dim] - 1; /* Check for previous partial chunk in this dimension */ - if(is_partial_dim[curr_dim] && end[curr_dim] < file_dims[curr_dim]) { + if(is_partial_dim[curr_dim] && end[curr_dim] < file_dims[curr_dim]) { /* Sanity check */ HDassert(num_partial_dims > 0); @@ -1805,16 +1805,16 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__create_chunk_file_map_all() */ - + /*------------------------------------------------------------------------- - * Function: H5D__create_chunk_file_map_hyper + * Function: H5D__create_chunk_file_map_hyper * - * Purpose: Create all chunk selections in file, for a hyperslab selection. + * Purpose: Create all chunk selections in file, for a hyperslab selection. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Thursday, May 29, 2003 + * Programmer: Quincey Koziol + * Thursday, May 29, 2003 * *------------------------------------------------------------------------- */ @@ -1834,10 +1834,10 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t hsize_t end[H5O_LAYOUT_NDIMS]; /* Final coordinates of chunk */ hsize_t chunk_index; /* Index of chunk */ hsize_t start_scaled[H5S_MAX_RANK]; /* Starting scaled coordinates of selection */ - hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ + hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ int curr_dim; /* Current dimension to increment */ unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -1913,7 +1913,7 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t new_chunk_info->mspace_shared = FALSE; /* Copy the chunk's scaled coordinates */ - H5MM_memcpy(new_chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims); + H5MM_memcpy(new_chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims); new_chunk_info->scaled[fm->f_ndims] = 0; /* Insert the new chunk into the skip list */ @@ -1979,18 +1979,18 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__create_chunk_file_map_hyper() */ - + /*------------------------------------------------------------------------- - * Function: H5D__create_chunk_mem_map_hyper + * Function: H5D__create_chunk_mem_map_hyper * - * Purpose: Create all chunk selections in memory by copying the file + * Purpose: Create all chunk selections in memory by copying the file * chunk selections and adjusting their offsets to be correct * for the memory. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Thursday, May 29, 2003 + * Programmer: Quincey Koziol + * Thursday, May 29, 2003 * * Assumptions: That the file and memory selections are the same shape. * @@ -2007,7 +2007,7 @@ H5D__create_chunk_mem_map_hyper(const H5D_chunk_map_t *fm) hsize_t mem_sel_end[H5S_MAX_RANK]; /* Offset of high bound of file selection */ hssize_t adjust[H5S_MAX_RANK]; /* Adjustment to make to all file chunks */ unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -2111,17 +2111,17 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__create_chunk_mem_map_hyper() */ - + /*------------------------------------------------------------------------- - * Function: H5D__create_mem_map_1d + * Function: H5D__create_mem_map_1d * - * Purpose: Create all chunk selections for 1-dimensional regular memory space + * Purpose: Create all chunk selections for 1-dimensional regular memory space * that has only one single block in the selection * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Vailin Choi - * Sept 18, 2019 + * Programmer: Vailin Choi + * Sept 18, 2019 * *------------------------------------------------------------------------- */ @@ -2192,17 +2192,17 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__create_chunk_mem_map_1d() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_file_cb + * Function: H5D__chunk_file_cb * - * Purpose: Callback routine for file selection iterator. Used when + * Purpose: Callback routine for file selection iterator. Used when * creating selections in file for each point selected. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Wednesday, July 23, 2003 + * Programmer: Quincey Koziol + * Wednesday, July 23, 2003 * *------------------------------------------------------------------------- */ @@ -2214,9 +2214,9 @@ H5D__chunk_file_cb(void H5_ATTR_UNUSED *elem, const H5T_t H5_ATTR_UNUSED *type, H5D_chunk_info_t *chunk_info; /* Chunk information for current chunk */ hsize_t coords_in_chunk[H5O_LAYOUT_NDIMS]; /* Coordinates of element in chunk */ hsize_t chunk_index; /* Chunk index */ - hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ + hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -2309,17 +2309,17 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_file_cb() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_mem_cb + * Function: H5D__chunk_mem_cb * - * Purpose: Callback routine for file selection iterator. Used when + * Purpose: Callback routine for file selection iterator. Used when * creating selections in memory for each chunk. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Raymond Lu - * Thursday, April 10, 2003 + * Programmer: Raymond Lu + * Thursday, April 10, 2003 * *------------------------------------------------------------------------- */ @@ -2330,7 +2330,7 @@ H5D__chunk_mem_cb(void H5_ATTR_UNUSED *elem, const H5T_t H5_ATTR_UNUSED *type, u H5D_chunk_info_t *chunk_info; /* Chunk information for current chunk */ hsize_t coords_in_mem[H5S_MAX_RANK]; /* Coordinates of element in memory */ hsize_t chunk_index; /* Chunk index */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -2385,17 +2385,17 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_mem_cb() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_cacheable + * Function: H5D__chunk_cacheable * - * Purpose: A small internal function to if it's possible to load the + * Purpose: A small internal function to if it's possible to load the * chunk into cache. * - * Return: TRUE or FALSE + * Return: TRUE or FALSE * - * Programmer: Raymond Lu - * 17 July 2007 + * Programmer: Raymond Lu + * 17 July 2007 * *------------------------------------------------------------------------- */ @@ -2477,16 +2477,16 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_cacheable() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_read + * Function: H5D__chunk_read * - * Purpose: Read from a chunked dataset. + * Purpose: Read from a chunked dataset. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Raymond Lu - * Thursday, April 10, 2003 + * Programmer: Raymond Lu + * Thursday, April 10, 2003 * *------------------------------------------------------------------------- */ @@ -2504,7 +2504,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */ uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */ hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */ - herr_t ret_value = SUCCEED; /*return value */ + herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_STATIC @@ -2556,7 +2556,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm); while(chunk_node) { H5D_chunk_info_t *chunk_info; /* Chunk information */ - H5D_chunk_ud_t udata; /* Chunk index pass-through */ + H5D_chunk_ud_t udata; /* Chunk index pass-through */ /* Get the actual chunk information from the skip list node */ chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); @@ -2566,7 +2566,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") /* Sanity check */ - HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || + HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); /* Check for non-existant chunk & skip it if appropriate */ @@ -2629,16 +2629,16 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_read() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_write + * Function: H5D__chunk_write * - * Purpose: Writes to a chunked dataset. + * Purpose: Writes to a chunked dataset. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Raymond Lu - * Thursday, April 10, 2003 + * Programmer: Raymond Lu + * Thursday, April 10, 2003 * *------------------------------------------------------------------------- */ @@ -2654,7 +2654,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */ hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */ uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -2684,10 +2684,10 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm); while(chunk_node) { H5D_chunk_info_t *chunk_info; /* Chunk information */ - H5D_chk_idx_info_t idx_info; /* Chunked index info */ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */ void *chunk; /* Pointer to locked chunk buffer */ - H5D_chunk_ud_t udata; /* Index pass-through */ + H5D_chunk_ud_t udata; /* Index pass-through */ htri_t cacheable; /* Whether the chunk is cacheable */ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */ @@ -2699,11 +2699,11 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") /* Sanity check */ - HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || + HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); - /* Set chunk's [scaled] coordinates */ - io_info->store->chunk.scaled = chunk_info->scaled; + /* Set chunk's [scaled] coordinates */ + io_info->store->chunk.scaled = chunk_info->scaled; /* Determine if we should use the chunk cache */ if((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0) @@ -2720,7 +2720,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* Determine if we will access all the data in the chunk */ if(dst_accessed_bytes != ctg_store.contig.dset_size || (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size || - fm->fsel_type == H5S_SEL_POINTS) + fm->fsel_type == H5S_SEL_POINTS) entire_chunk = FALSE; /* Lock the chunk into the cache */ @@ -2746,8 +2746,8 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size; /* Allocate the chunk */ - if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, chunk_info->scaled) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level") + if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, chunk_info->scaled) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level") /* Make sure the address of the chunk is returned. */ if(!H5F_addr_defined(udata.chunk_block.offset)) @@ -2772,16 +2772,16 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, (hsize_t)chunk_info->chunk_points, chunk_info->fspace, chunk_info->mspace) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed") - /* Release the cache lock on the chunk, or insert chunk into index. */ - if(chunk) { - if(H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") - } /* end if */ - else { + /* Release the cache lock on the chunk, or insert chunk into index. */ + if(chunk) { + if(H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") + } /* end if */ + else { if(need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert) if((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index") - } /* end else */ + } /* end else */ /* Advance to next chunk in list */ chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node); @@ -2791,16 +2791,16 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_write() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_flush + * Function: H5D__chunk_flush * - * Purpose: Writes all dirty chunks to disk and optionally preempts them - * from the cache. + * Purpose: Writes all dirty chunks to disk and optionally preempts them + * from the cache. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, May 21, 1998 * *------------------------------------------------------------------------- @@ -2809,8 +2809,8 @@ static herr_t H5D__chunk_flush(H5D_t *dset) { H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); - H5D_rdcc_ent_t *ent, *next; - unsigned nerrors = 0; /* Count of any errors encountered when flushing chunks */ + H5D_rdcc_ent_t *ent, *next; + unsigned nerrors = 0; /* Count of any errors encountered when flushing chunks */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -2820,34 +2820,34 @@ H5D__chunk_flush(H5D_t *dset) /* Loop over all entries in the chunk cache */ for(ent = rdcc->head; ent; ent = next) { - next = ent->next; + next = ent->next; if(H5D__chunk_flush_entry(dset, ent, FALSE) < 0) nerrors++; } /* end for */ if(nerrors) - HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks") + HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_flush() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_io_term + * Function: H5D__chunk_io_term * - * Purpose: Destroy I/O operation information. + * Purpose: Destroy I/O operation information. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Saturday, May 17, 2003 + * Programmer: Quincey Koziol + * Saturday, May 17, 2003 * *------------------------------------------------------------------------- */ static herr_t H5D__chunk_io_term(const H5D_chunk_map_t *fm) { - herr_t ret_value = SUCCEED; /*return value */ + herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_STATIC @@ -2882,16 +2882,16 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_io_term() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_dest + * Function: H5D__chunk_dest * - * Purpose: Destroy the entire chunk cache by flushing dirty entries, - * preempting all entries, and freeing the cache itself. + * Purpose: Destroy the entire chunk cache by flushing dirty entries, + * preempting all entries, and freeing the cache itself. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, May 21, 1998 * *------------------------------------------------------------------------- @@ -2900,9 +2900,9 @@ static herr_t H5D__chunk_dest(H5D_t *dset) { H5D_chk_idx_info_t idx_info; /* Chunked index info */ - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Dataset's chunk cache */ - H5D_rdcc_ent_t *ent = NULL, *next = NULL; /* Pointer to current & next cache entries */ - int nerrors = 0; /* Accumulated count of errors */ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Dataset's chunk cache */ + H5D_rdcc_ent_t *ent = NULL, *next = NULL; /* Pointer to current & next cache entries */ + int nerrors = 0; /* Accumulated count of errors */ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk); herr_t ret_value = SUCCEED; /* Return value */ @@ -2918,7 +2918,7 @@ H5D__chunk_dest(H5D_t *dset) if(H5D__chunk_cache_evict(dset, ent, TRUE) < 0) nerrors++; } /* end for */ - + /* Continue even if there are failures. */ if(nerrors) HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks") @@ -2942,15 +2942,15 @@ done: FUNC_LEAVE_NOAPI_TAG(ret_value) } /* end H5D__chunk_dest() */ - + /*------------------------------------------------------------------------- - * Function: H5D_chunk_idx_reset + * Function: H5D_chunk_idx_reset * - * Purpose: Reset index information + * Purpose: Reset index information * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Thursday, January 15, 2009 * *------------------------------------------------------------------------- @@ -2969,21 +2969,21 @@ H5D_chunk_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr) /* Reset index structures */ if((storage->ops->reset)(storage, reset_addr) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to reset chunk index info") + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to reset chunk index info") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_chunk_idx_reset() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_cinfo_cache_reset + * Function: H5D__chunk_cinfo_cache_reset * - * Purpose: Reset the cached chunk info + * Purpose: Reset the cached chunk info * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * November 27, 2007 * *------------------------------------------------------------------------- @@ -3002,15 +3002,15 @@ H5D__chunk_cinfo_cache_reset(H5D_chunk_cached_t *last) FUNC_LEAVE_NOAPI(SUCCEED) } /* H5D__chunk_cinfo_cache_reset() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_cinfo_cache_update + * Function: H5D__chunk_cinfo_cache_update * - * Purpose: Update the cached chunk info + * Purpose: Update the cached chunk info * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * November 27, 2007 * *------------------------------------------------------------------------- @@ -3039,15 +3039,15 @@ H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *ud FUNC_LEAVE_NOAPI(SUCCEED) } /* H5D__chunk_cinfo_cache_update() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_cinfo_cache_found + * Function: H5D__chunk_cinfo_cache_found * - * Purpose: Look for chunk info in cache + * Purpose: Look for chunk info in cache * - * Return: TRUE/FALSE/FAIL + * Return: TRUE/FALSE/FAIL * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * November 27, 2007 * *------------------------------------------------------------------------- @@ -3088,19 +3088,19 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_cinfo_cache_found() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_create + * Function: H5D__chunk_create * - * Purpose: Creates a new chunked storage index and initializes the - * layout information with information about the storage. The - * layout info should be immediately written to the object header. + * Purpose: Creates a new chunked storage index and initializes the + * layout information with information about the storage. The + * layout info should be immediately written to the object header. * - * Return: Non-negative on success (with the layout information initialized - * and ready to write to an object header). Negative on failure. + * Return: Non-negative on success (with the layout information initialized + * and ready to write to an object header). Negative on failure. * - * Programmer: Quincey Koziol - * Thursday, May 22, 2008 + * Programmer: Quincey Koziol + * Thursday, May 22, 2008 * *------------------------------------------------------------------------- */ @@ -3124,7 +3124,7 @@ H5D__chunk_create(const H5D_t *dset /*in,out*/) unsigned u; /* Local index variable */ for(u = 0; u < dset->shared->layout.u.chunk.ndims; u++) - HDassert(dset->shared->layout.u.chunk.dim[u] > 0); + HDassert(dset->shared->layout.u.chunk.dim[u] > 0); } #endif @@ -3142,16 +3142,16 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_create() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_hash_val + * Function: H5D__chunk_hash_val * - * Purpose: To calculate an index based on the dataset's scaled - * coordinates and sizes of the faster dimensions. + * Purpose: To calculate an index based on the dataset's scaled + * coordinates and sizes of the faster dimensions. * - * Return: Hash value index + * Return: Hash value index * - * Programmer: Vailin Choi; Nov 2014 + * Programmer: Vailin Choi; Nov 2014 * *------------------------------------------------------------------------- */ @@ -3184,16 +3184,16 @@ H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled) FUNC_LEAVE_NOAPI(ret) } /* H5D__chunk_hash_val() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_lookup + * Function: H5D__chunk_lookup * - * Purpose: Loops up a chunk in cache and on disk, and retrieves + * Purpose: Loops up a chunk in cache and on disk, and retrieves * information about that chunk. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Albert Cheng + * Programmer: Albert Cheng * June 27, 1998 * *------------------------------------------------------------------------- @@ -3206,7 +3206,7 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk); unsigned idx; /* Index of chunk in cache, if present */ hbool_t found = FALSE; /* In cache? */ - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -3326,18 +3326,18 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_lookup() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_flush_entry + * Function: H5D__chunk_flush_entry * - * Purpose: Writes a chunk to disk. If RESET is non-zero then the - * entry is cleared -- it's slightly faster to flush a chunk if - * the RESET flag is turned on because it results in one fewer - * memory copy. + * Purpose: Writes a chunk to disk. If RESET is non-zero then the + * entry is cleared -- it's slightly faster to flush a chunk if + * the RESET flag is turned on because it results in one fewer + * memory copy. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, May 21, 1998 * *------------------------------------------------------------------------- @@ -3345,10 +3345,10 @@ done: static herr_t H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset) { - void *buf = NULL; /* Temporary buffer */ - hbool_t point_of_no_return = FALSE; + void *buf = NULL; /* Temporary buffer */ + hbool_t point_of_no_return = FALSE; H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk); - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC_TAG(dset->oloc.addr) @@ -3361,7 +3361,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset) buf = ent->chunk; if(ent->dirty) { H5D_chk_idx_info_t idx_info; /* Chunked index info */ - H5D_chunk_ud_t udata; /* pass through B-tree */ + H5D_chunk_ud_t udata; /* pass through B-tree */ hbool_t must_alloc = FALSE; /* Whether the chunk must be allocated */ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */ @@ -3379,7 +3379,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset) && !(ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS)) { H5Z_EDC_t err_detect; /* Error detection info */ H5Z_cb_t filter_cb; /* I/O filter callback function */ - size_t alloc = udata.chunk_block.length; /* Bytes allocated for BUF */ + size_t alloc = udata.chunk_block.length; /* Bytes allocated for BUF */ size_t nbytes; /* Chunk size (in bytes) */ /* Retrieve filter settings from API context */ @@ -3517,7 +3517,7 @@ done: FUNC_LEAVE_NOAPI_TAG(ret_value) } /* end H5D__chunk_flush_entry() */ - + /*------------------------------------------------------------------------- * Function: H5D__chunk_cache_evict * @@ -3596,17 +3596,17 @@ H5D__chunk_cache_evict(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t flush) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_cache_evict() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_cache_prune + * Function: H5D__chunk_cache_prune * - * Purpose: Prune the cache by preempting some things until the cache has - * room for something which is SIZE bytes. Only unlocked - * entries are considered for preemption. + * Purpose: Prune the cache by preempting some things until the cache has + * room for something which is SIZE bytes. Only unlocked + * entries are considered for preemption. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, May 21, 1998 * *------------------------------------------------------------------------- @@ -3614,13 +3614,13 @@ H5D__chunk_cache_evict(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t flush) static herr_t H5D__chunk_cache_prune(const H5D_t *dset, size_t size) { - const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); - size_t total = rdcc->nbytes_max; - const int nmeth = 2; /*number of methods */ - int w[1]; /*weighting as an interval */ - H5D_rdcc_ent_t *p[2], *cur; /*list pointers */ - H5D_rdcc_ent_t *n[2]; /*list next pointers */ - int nerrors = 0; /* Accumulated error count during preemptions */ + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); + size_t total = rdcc->nbytes_max; + const int nmeth = 2; /*number of methods */ + int w[1]; /*weighting as an interval */ + H5D_rdcc_ent_t *p[2], *cur; /*list pointers */ + H5D_rdcc_ent_t *n[2]; /*list next pointers */ + int nerrors = 0; /* Accumulated error count during preemptions */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -3642,91 +3642,91 @@ H5D__chunk_cache_prune(const H5D_t *dset, size_t size) while((p[0] || p[1]) && (rdcc->nbytes_used + size) > total) { int i; /* Local index variable */ - /* Introduce new pointers */ - for(i = 0; i < nmeth - 1; i++) + /* Introduce new pointers */ + for(i = 0; i < nmeth - 1; i++) if(0 == w[i]) p[i + 1] = rdcc->head; - /* Compute next value for each pointer */ - for(i = 0; i < nmeth; i++) + /* Compute next value for each pointer */ + for(i = 0; i < nmeth; i++) n[i] = p[i] ? p[i]->next : NULL; - /* Give each method a chance */ - for(i = 0; i < nmeth && (rdcc->nbytes_used + size) > total; i++) { - if(0 == i && p[0] && !p[0]->locked && + /* Give each method a chance */ + for(i = 0; i < nmeth && (rdcc->nbytes_used + size) > total; i++) { + if(0 == i && p[0] && !p[0]->locked && ((0 == p[0]->rd_count && 0 == p[0]->wr_count) || (0 == p[0]->rd_count && dset->shared->layout.u.chunk.size == p[0]->wr_count) || (dset->shared->layout.u.chunk.size == p[0]->rd_count && 0 == p[0]->wr_count))) { - /* - * Method 0: Preempt entries that have been completely written - * and/or completely read but not entries that are partially - * written or partially read. - */ - cur = p[0]; - } else if(1 == i && p[1] && !p[1]->locked) { - /* - * Method 1: Preempt the entry without regard to - * considerations other than being locked. This is the last - * resort preemption. - */ - cur = p[1]; - } else { - /* Nothing to preempt at this point */ - cur = NULL; - } - - if(cur) { + /* + * Method 0: Preempt entries that have been completely written + * and/or completely read but not entries that are partially + * written or partially read. + */ + cur = p[0]; + } else if(1 == i && p[1] && !p[1]->locked) { + /* + * Method 1: Preempt the entry without regard to + * considerations other than being locked. This is the last + * resort preemption. + */ + cur = p[1]; + } else { + /* Nothing to preempt at this point */ + cur = NULL; + } + + if(cur) { int j; /* Local index variable */ - for(j = 0; j < nmeth; j++) { - if(p[j] == cur) + for(j = 0; j < nmeth; j++) { + if(p[j] == cur) p[j] = NULL; - if(n[j] == cur) + if(n[j] == cur) n[j] = cur->next; - } /* end for */ - if(H5D__chunk_cache_evict(dset, cur, TRUE) < 0) + } /* end for */ + if(H5D__chunk_cache_evict(dset, cur, TRUE) < 0) nerrors++; - } /* end if */ - } /* end for */ + } /* end if */ + } /* end for */ - /* Advance pointers */ - for(i = 0; i < nmeth; i++) + /* Advance pointers */ + for(i = 0; i < nmeth; i++) p[i] = n[i]; - for(i = 0; i < nmeth - 1; i++) + for(i = 0; i < nmeth - 1; i++) w[i] -= 1; } /* end while */ if(nerrors) - HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to preempt one or more raw data cache entry") + HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to preempt one or more raw data cache entry") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_cache_prune() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_lock + * Function: H5D__chunk_lock * - * Purpose: Return a pointer to a dataset chunk. The pointer points - * directly into the chunk cache and should not be freed - * by the caller but will be valid until it is unlocked. The - * input value IDX_HINT is used to speed up cache lookups and - * it's output value should be given to H5D__chunk_unlock(). - * IDX_HINT is ignored if it is out of range, and if it points - * to the wrong entry then we fall back to the normal search - * method. + * Purpose: Return a pointer to a dataset chunk. The pointer points + * directly into the chunk cache and should not be freed + * by the caller but will be valid until it is unlocked. The + * input value IDX_HINT is used to speed up cache lookups and + * it's output value should be given to H5D__chunk_unlock(). + * IDX_HINT is ignored if it is out of range, and if it points + * to the wrong entry then we fall back to the normal search + * method. * - * If RELAX is non-zero and the chunk isn't in the cache then - * don't try to read it from the file, but just allocate an - * uninitialized buffer to hold the result. This is intended - * for output functions that are about to overwrite the entire - * chunk. + * If RELAX is non-zero and the chunk isn't in the cache then + * don't try to read it from the file, but just allocate an + * uninitialized buffer to hold the result. This is intended + * for output functions that are about to overwrite the entire + * chunk. * - * Return: Success: Ptr to a file chunk. + * Return: Success: Ptr to a file chunk. * - * Failure: NULL + * Failure: NULL * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, May 21, 1998 * *------------------------------------------------------------------------- @@ -3742,12 +3742,12 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */ H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */ hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */ - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/ - H5D_rdcc_ent_t *ent; /*cache entry */ - size_t chunk_size; /*size of a chunk */ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/ + H5D_rdcc_ent_t *ent; /*cache entry */ + size_t chunk_size; /*size of a chunk */ hbool_t disable_filters = FALSE; /* Whether to disable filters (when adding to cache) */ - void *chunk = NULL; /*the file chunk */ - void *ret_value = NULL; /* Return value */ + void *chunk = NULL; /*the file chunk */ + void *ret_value = NULL; /* Return value */ FUNC_ENTER_STATIC @@ -3774,7 +3774,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, #ifndef NDEBUG { - unsigned u; /*counters */ + unsigned u; /*counters */ /* Make sure this is the right chunk */ for(u = 0; u < layout->u.chunk.ndims - 1; u++) @@ -3932,8 +3932,8 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, /* Check if the chunk exists on disk */ if(H5F_addr_defined(chunk_addr)) { - size_t my_chunk_alloc = chunk_alloc; /* Allocated buffer size */ - size_t buf_alloc = chunk_alloc; /* [Re-]allocated buffer size */ + size_t my_chunk_alloc = chunk_alloc; /* Allocated buffer size */ + size_t buf_alloc = chunk_alloc; /* [Re-]allocated buffer size */ /* Chunk size on disk isn't [likely] the same size as the final chunk * size in memory, so allocate memory big enough. */ @@ -3973,7 +3973,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, rdcc->stats.nmisses++; } /* end if */ else { - H5D_fill_value_t fill_status; + H5D_fill_value_t fill_status; /* Sanity check */ HDassert(fill->alloc_time != H5D_ALLOC_TIME_EARLY); @@ -4107,25 +4107,25 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_lock() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_unlock + * Function: H5D__chunk_unlock * - * Purpose: Unlocks a previously locked chunk. The LAYOUT, COMP, and - * OFFSET arguments should be the same as for H5D__chunk_lock(). - * The DIRTY argument should be set to non-zero if the chunk has - * been modified since it was locked. The IDX_HINT argument is - * the returned index hint from the lock operation and BUF is - * the return value from the lock. + * Purpose: Unlocks a previously locked chunk. The LAYOUT, COMP, and + * OFFSET arguments should be the same as for H5D__chunk_lock(). + * The DIRTY argument should be set to non-zero if the chunk has + * been modified since it was locked. The IDX_HINT argument is + * the returned index hint from the lock operation and BUF is + * the return value from the lock. * - * The NACCESSED argument should be the number of bytes accessed - * for reading or writing (depending on the value of DIRTY). - * It's only purpose is to provide additional information to the - * preemption policy. + * The NACCESSED argument should be the number of bytes accessed + * for reading or writing (depending on the value of DIRTY). + * It's only purpose is to provide additional information to the + * preemption policy. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, May 21, 1998 * *------------------------------------------------------------------------- @@ -4135,7 +4135,7 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, hbool_t dirty, void *chunk, uint32_t naccessed) { const H5O_layout_t *layout = &(io_info->dset->shared->layout); /* Dataset layout */ - const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk); + const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk); herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -4173,9 +4173,9 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, fake_ent.edge_chunk_state = H5D_RDCC_DISABLE_FILTERS; if(udata->new_unfilt_chunk) fake_ent.edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS; - H5MM_memcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims); + H5MM_memcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims); HDassert(layout->u.chunk.size > 0); - fake_ent.chunk_idx = udata->chunk_idx; + fake_ent.chunk_idx = udata->chunk_idx; fake_ent.chunk_block.offset = udata->chunk_block.offset; fake_ent.chunk_block.length = udata->chunk_block.length; fake_ent.chunk = (uint8_t *)chunk; @@ -4186,16 +4186,16 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, else { if(chunk) chunk = H5D__chunk_mem_xfree(chunk, (is_unfiltered_edge_chunk ? NULL - : &(io_info->dset->shared->dcpl_cache.pline))); + : &(io_info->dset->shared->dcpl_cache.pline))); } /* end else */ } /* end if */ else { - H5D_rdcc_ent_t *ent; /* Chunk's entry in the cache */ + H5D_rdcc_ent_t *ent; /* Chunk's entry in the cache */ /* Sanity check */ - HDassert(udata->idx_hint < rdcc->nslots); - HDassert(rdcc->slot[udata->idx_hint]); - HDassert(rdcc->slot[udata->idx_hint]->chunk == chunk); + HDassert(udata->idx_hint < rdcc->nslots); + HDassert(rdcc->slot[udata->idx_hint]); + HDassert(rdcc->slot[udata->idx_hint]->chunk == chunk); /* * It's in the cache so unlock it. @@ -4215,16 +4215,16 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_unlock() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_allocated_cb + * Function: H5D__chunk_allocated_cb * - * Purpose: Simply counts the number of chunks for a dataset. + * Purpose: Simply counts the number of chunks for a dataset. * - * Return: Success: Non-negative - * Failure: Negative + * Return: Success: Non-negative + * Failure: Negative * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Wednesday, April 21, 1999 * *------------------------------------------------------------------------- @@ -4241,17 +4241,17 @@ H5D__chunk_allocated_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) FUNC_LEAVE_NOAPI(H5_ITER_CONT) } /* H5D__chunk_allocated_cb() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_allocated + * Function: H5D__chunk_allocated * - * Purpose: Return the number of bytes allocated in the file for storage - * of raw data in the chunked dataset + * Purpose: Return the number of bytes allocated in the file for storage + * of raw data in the chunked dataset * - * Return: Success: Number of bytes stored in all chunks. - * Failure: 0 + * Return: Success: Number of bytes stored in all chunks. + * Failure: 0 * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Tuesday, May 20, 2008 * *------------------------------------------------------------------------- @@ -4260,7 +4260,7 @@ herr_t H5D__chunk_allocated(const H5D_t *dset, hsize_t *nbytes) { H5D_chk_idx_info_t idx_info; /* Chunked index info */ - const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Raw data chunk cache */ + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Raw data chunk cache */ H5D_rdcc_ent_t *ent; /* Cache entry */ hsize_t chunk_bytes = 0; /* Number of bytes allocated for chunks */ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk); @@ -4295,18 +4295,18 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_allocated() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_allocate + * Function: H5D__chunk_allocate * - * Purpose: Allocate file space for all chunks that are not allocated yet. - * Return SUCCEED if all needed allocation succeed, otherwise - * FAIL. + * Purpose: Allocate file space for all chunks that are not allocated yet. + * Return SUCCEED if all needed allocation succeed, otherwise + * FAIL. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Albert Cheng - * June 26, 1998 + * Programmer: Albert Cheng + * June 26, 1998 * *------------------------------------------------------------------------- */ @@ -4318,8 +4318,8 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ const H5D_chunk_ops_t *ops = dset->shared->layout.storage.u.chunk.ops; /* Chunk operations */ hsize_t min_unalloc[H5O_LAYOUT_NDIMS]; /* First chunk in each dimension that is unallocated (in scaled coordinates) */ hsize_t max_unalloc[H5O_LAYOUT_NDIMS]; /* Last chunk in each dimension that is unallocated (in scaled coordinates) */ - hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Offset of current chunk (in scaled coordinates) */ - size_t orig_chunk_size; /* Original size of chunk in bytes */ + hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Offset of current chunk (in scaled coordinates) */ + size_t orig_chunk_size; /* Original size of chunk in bytes */ size_t chunk_size; /* Actual size of chunk in bytes, possibly filtered */ unsigned filter_mask = 0; /* Filter mask for chunks that have them */ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ @@ -4335,7 +4335,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ hbool_t using_mpi = FALSE; /* Flag to indicate that the file is being accessed with an MPI-capable file driver */ H5D_chunk_coll_info_t chunk_info; /* chunk address information for doing I/O */ #endif /* H5_HAVE_PARALLEL */ - hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */ + hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */ unsigned space_ndims; /* Dataset's space rank */ const hsize_t *space_dim; /* Dataset's dataspace dimensions */ const uint32_t *chunk_dim = layout->u.chunk.dim; /* Convenience pointer to chunk dimensions */ @@ -4347,7 +4347,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ hsize_t edge_chunk_scaled[H5O_LAYOUT_NDIMS]; /* Offset of the unfiltered edge chunks at the edge of each dimension */ unsigned nunfilt_edge_chunk_dims = 0; /* Number of dimensions on an edge */ const H5O_storage_chunk_t *sc = &(layout->storage.u.chunk); - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_TAG(dset->oloc.addr) @@ -4637,8 +4637,8 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ udata.filter_mask = filter_mask; /* Allocate the chunk (with all processes) */ - if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, scaled) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level") + if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, scaled) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level") HDassert(H5F_addr_defined(udata.chunk_block.offset)); /* Check if fill values should be written to chunks */ @@ -4677,7 +4677,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ } /* end if */ /* Insert the chunk record into the index */ - if(need_insert && ops->insert) + if(need_insert && ops->insert) if((ops->insert)(&idx_info, &udata, dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index") @@ -4756,7 +4756,7 @@ done: FUNC_LEAVE_NOAPI_TAG(ret_value) } /* end H5D__chunk_allocate() */ - + /*------------------------------------------------------------------------- * Function: H5D__chunk_update_old_edge_chunks * @@ -4837,12 +4837,12 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hsize_t old_dim[]) /* Calculate offset of first previously incomplete chunk in this * dimension */ - old_edge_chunk_sc[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim]); + old_edge_chunk_sc[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim]); /* Calculate the largest offset of chunks that might need to be * modified in this dimension */ - max_edge_chunk_sc[op_dim] = MIN((old_dim[op_dim] - 1) / chunk_dim[op_dim], - MAX((space_dim[op_dim] / chunk_dim[op_dim]), 1) - 1); + max_edge_chunk_sc[op_dim] = MIN((old_dim[op_dim] - 1) / chunk_dim[op_dim], + MAX((space_dim[op_dim] / chunk_dim[op_dim]), 1) - 1); /* Check for old_dim aligned with chunk boundary in this dimension, if * so we do not need to modify chunks along the edge in this dimension @@ -4852,7 +4852,7 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hsize_t old_dim[]) /* Check if the dataspace expanded enough to cause the old edge chunks * in this dimension to become full */ - if((space_dim[op_dim]/chunk_dim[op_dim]) >= (old_edge_chunk_sc[op_dim] + 1)) + if((space_dim[op_dim]/chunk_dim[op_dim]) >= (old_edge_chunk_sc[op_dim] + 1)) new_full_dim[op_dim] = TRUE; } /* end for */ @@ -4900,7 +4900,7 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hsize_t old_dim[]) carry = TRUE; for(i = ((int)space_ndims - 1); i >= 0; --i) { if((unsigned)i != op_dim) { - ++chunk_sc[i]; + ++chunk_sc[i]; if(chunk_sc[i] > (hsize_t) max_edge_chunk_sc[i]) chunk_sc[i] = 0; else { @@ -4928,18 +4928,18 @@ done: } /* end H5D__chunk_update_old_edge_chunks() */ #ifdef H5_HAVE_PARALLEL - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_collective_fill + * Function: H5D__chunk_collective_fill * * Purpose: Use MPIO collective write to fill the chunks (if number of - * chunks to fill is greater than the number of MPI procs; + * chunks to fill is greater than the number of MPI procs; * otherwise use independent I/O). * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Mohamad Chaarawi - * July 30, 2014 + * Programmer: Mohamad Chaarawi + * July 30, 2014 * *------------------------------------------------------------------------- */ @@ -4947,7 +4947,7 @@ static herr_t H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info, size_t chunk_size, const void *fill_buf) { - MPI_Comm mpi_comm = MPI_COMM_NULL; /* MPI communicator for file */ + MPI_Comm mpi_comm = MPI_COMM_NULL; /* MPI communicator for file */ int mpi_rank = (-1); /* This process's rank */ int mpi_size = (-1); /* MPI Comm size */ int mpi_code; /* MPI return code */ @@ -5116,17 +5116,17 @@ H5D__chunk_cmp_addr(const void *addr1, const void *addr2) } /* end H5D__chunk_cmp_addr() */ #endif /* H5_HAVE_PARALLEL */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_prune_fill + * Function: H5D__chunk_prune_fill * - * Purpose: Write the fill value to the parts of the chunk that are no + * Purpose: Write the fill value to the parts of the chunk that are no * longer part of the dataspace * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu - * March 26, 2002 + * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu + * March 26, 2002 * *------------------------------------------------------------------------- */ @@ -5143,7 +5143,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk) hsize_t sel_nelmts; /* Number of elements in selection */ hsize_t count[H5O_LAYOUT_NDIMS]; /* Element count of hyperslab */ size_t chunk_size; /*size of a chunk */ - void *chunk; /* The file chunk */ + void *chunk; /* The file chunk */ H5D_chunk_ud_t chk_udata; /* User data for locking chunk */ uint32_t bytes_accessed; /* Bytes accessed in chunk */ unsigned u; /* Local index variable */ @@ -5242,18 +5242,18 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_prune_fill */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_prune_by_extent + * Function: H5D__chunk_prune_by_extent * - * Purpose: This function searches for chunks that are no longer necessary + * Purpose: This function searches for chunks that are no longer necessary * both in the raw data cache and in the chunk index. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu - * Algorithm: Robb Matzke - * March 27, 2002 + * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu + * Algorithm: Robb Matzke + * March 27, 2002 * * The algorithm is: * @@ -5355,7 +5355,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, const hsize_t *old_dim) H5D_io_info_t chk_io_info; /* Chunked I/O info object */ H5D_storage_t chk_store; /* Chunk storage information */ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */ - const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ unsigned space_ndims; /* Dataset's space rank */ const hsize_t *space_dim; /* Current dataspace dimensions */ unsigned op_dim; /* Current operating dimension */ @@ -5370,7 +5370,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, const hsize_t *old_dim) uint32_t elmts_per_chunk; /* Elements in chunk */ hbool_t disable_edge_filters = FALSE; /* Whether to disable filters on partial edge chunks */ hbool_t new_unfilt_chunk = FALSE; /* Whether the chunk is newly unfiltered */ - unsigned u; /* Local index variable */ + unsigned u; /* Local index variable */ const H5O_storage_chunk_t *sc = &(layout->storage.u.chunk); herr_t ret_value = SUCCEED; /* Return value */ @@ -5404,13 +5404,13 @@ H5D__chunk_prune_by_extent(H5D_t *dset, const hsize_t *old_dim) elmts_per_chunk = 1; for(u = 0; u < space_ndims; u++) { elmts_per_chunk *= layout->u.chunk.dim[u]; - chunk_dim[u] = layout->u.chunk.dim[u]; - shrunk_dim[u] = (space_dim[u] < old_dim[u]); + chunk_dim[u] = layout->u.chunk.dim[u]; + shrunk_dim[u] = (space_dim[u] < old_dim[u]); } /* end for */ /* Create a dataspace for a chunk & set the extent */ if(NULL == (chunk_space = H5S_create_simple(space_ndims, chunk_dim, NULL))) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace") + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace") /* Reset hyperslab start array */ /* (hyperslabs will always start from origin) */ @@ -5537,7 +5537,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, const hsize_t *old_dim) carry = FALSE; while(!carry) { - int i; /* Local index variable */ + int i; /* Local index variable */ udata.common.scaled = scaled; @@ -5598,7 +5598,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, const hsize_t *old_dim) /* Remove the chunk from disk, if present */ if(H5F_addr_defined(chk_udata.chunk_block.offset)) { /* Update the offset in idx_udata */ - idx_udata.scaled = udata.common.scaled; + idx_udata.scaled = udata.common.scaled; /* Remove the chunk from disk */ if((sc->ops->remove)(&idx_info, &idx_udata) < 0) @@ -5666,16 +5666,16 @@ done: } /* end H5D__chunk_prune_by_extent() */ #ifdef H5_HAVE_PARALLEL - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_addrmap_cb + * Function: H5D__chunk_addrmap_cb * * Purpose: Callback when obtaining the chunk addresses for all existing chunks * - * Return: Success: Non-negative - * Failure: Negative + * Return: Success: Non-negative + * Failure: Negative * - * Programmer: Kent Yang + * Programmer: Kent Yang * Tuesday, November 15, 2005 * *------------------------------------------------------------------------- @@ -5683,7 +5683,7 @@ done: static int H5D__chunk_addrmap_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) { - H5D_chunk_it_ud2_t *udata = (H5D_chunk_it_ud2_t *)_udata; /* User data for callback */ + H5D_chunk_it_ud2_t *udata = (H5D_chunk_it_ud2_t *)_udata; /* User data for callback */ unsigned rank = udata->common.layout->ndims - 1; /* # of dimensions of dataset */ hsize_t chunk_index; @@ -5698,14 +5698,14 @@ H5D__chunk_addrmap_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) FUNC_LEAVE_NOAPI(H5_ITER_CONT) } /* H5D__chunk_addrmap_cb() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_addrmap + * Function: H5D__chunk_addrmap * * Purpose: Obtain the chunk addresses for all existing chunks * - * Return: Success: Non-negative on succeed. - * Failure: negative value + * Return: Success: Non-negative on succeed. + * Failure: negative value * * Programmer: Kent Yang * November 15, 2005 @@ -5717,7 +5717,7 @@ H5D__chunk_addrmap(const H5D_io_info_t *io_info, haddr_t chunk_addr[]) { H5D_chk_idx_info_t idx_info; /* Chunked index info */ const H5D_t *dset = io_info->dset; /* Local pointer to dataset info */ - H5D_chunk_it_ud2_t udata; /* User data for iteration callback */ + H5D_chunk_it_ud2_t udata; /* User data for iteration callback */ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk); herr_t ret_value = SUCCEED; /* Return value */ @@ -5749,16 +5749,16 @@ done: } /* end H5D__chunk_addrmap() */ #endif /* H5_HAVE_PARALLEL */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_delete + * Function: H5D__chunk_delete * - * Purpose: Delete raw data storage for entire dataset (i.e. all chunks) + * Purpose: Delete raw data storage for entire dataset (i.e. all chunks) * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Thursday, March 20, 2003 * *------------------------------------------------------------------------- @@ -5771,7 +5771,7 @@ H5D__chunk_delete(H5F_t *f, H5O_t *oh, H5O_storage_t *storage) hbool_t layout_read = FALSE; /* Whether the layout message was read from the file */ H5O_pline_t pline; /* I/O pipeline message */ hbool_t pline_read = FALSE; /* Whether the I/O pipeline message was read from the file */ - htri_t exists; /* Flag if header message of interest exists */ + htri_t exists; /* Flag if header message of interest exists */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -5826,17 +5826,17 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_delete() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_update_cache + * Function: H5D__chunk_update_cache * - * Purpose: Update any cached chunks index values after the dataspace + * Purpose: Update any cached chunks index values after the dataspace * size has changed * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * - * Programmer: Quincey Koziol + * Programmer: Quincey Koziol * Saturday, May 29, 2004 * *------------------------------------------------------------------------- @@ -5844,8 +5844,8 @@ done: herr_t H5D__chunk_update_cache(H5D_t *dset) { - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ - H5D_rdcc_ent_t *ent, *next; /*cache entry */ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ + H5D_rdcc_ent_t *ent, *next; /*cache entry */ H5D_rdcc_ent_t tmp_head; /* Sentinel entry for temporary entry list */ H5D_rdcc_ent_t *tmp_tail; /* Tail pointer for temporary entry list */ herr_t ret_value = SUCCEED; /* Return value */ @@ -5866,7 +5866,7 @@ H5D__chunk_update_cache(H5D_t *dset) /* Recompute the index for each cached chunk that is in a dataset */ for(ent = rdcc->head; ent; ent = next) { - unsigned old_idx; /* Previous index number */ + unsigned old_idx; /* Previous index number */ /* Get the pointer to the next cache entry */ next = ent->next; @@ -5876,7 +5876,7 @@ H5D__chunk_update_cache(H5D_t *dset) ent->idx = H5D__chunk_hash_val(dset->shared, ent->scaled); if(old_idx != ent->idx) { - H5D_rdcc_ent_t *old_ent; /* Old cache entry */ + H5D_rdcc_ent_t *old_ent; /* Old cache entry */ /* Check if there is already a chunk at this chunk's new location */ old_ent = rdcc->slot[ent->idx]; @@ -5939,7 +5939,7 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_update_cache() */ - + /*------------------------------------------------------------------------- * Function: H5D__chunk_copy_cb * @@ -6144,8 +6144,8 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) udata->chunk_in_cache = FALSE; - udata_dst.chunk_idx = H5VM_array_offset_pre(udata_dst.common.layout->ndims - 1, - udata_dst.common.layout->max_down_chunks, udata_dst.common.scaled); + udata_dst.chunk_idx = H5VM_array_offset_pre(udata_dst.common.layout->ndims - 1, + udata_dst.common.layout->max_down_chunks, udata_dst.common.scaled); /* Allocate chunk in the file */ if(H5D__chunk_file_alloc(udata->idx_info_dst, NULL, &udata_dst.chunk_block, &need_insert, udata_dst.common.scaled) < 0) @@ -6171,17 +6171,17 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_copy_cb() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_copy + * Function: H5D__chunk_copy * - * Purpose: Copy chunked storage from SRC file to DST file. + * Purpose: Copy chunked storage from SRC file to DST file. * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * * Programmer: Peter Cao - * August 20, 2005 + * August 20, 2005 * *------------------------------------------------------------------------- */ @@ -6455,7 +6455,7 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_copy() */ - + /*------------------------------------------------------------------------- * Function: H5D__chunk_bh_info * @@ -6534,18 +6534,18 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_bh_info() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_dump_index_cb + * Function: H5D__chunk_dump_index_cb * - * Purpose: If the UDATA.STREAM member is non-null then debugging + * Purpose: If the UDATA.STREAM member is non-null then debugging * information is written to that stream. * - * Return: Success: Non-negative + * Return: Success: Non-negative * - * Failure: Negative + * Failure: Negative * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Wednesday, April 21, 1999 * *------------------------------------------------------------------------- @@ -6553,7 +6553,7 @@ done: static int H5D__chunk_dump_index_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) { - H5D_chunk_it_ud4_t *udata = (H5D_chunk_it_ud4_t *)_udata; /* User data from caller */ + H5D_chunk_it_ud4_t *udata = (H5D_chunk_it_ud4_t *)_udata; /* User data from caller */ FUNC_ENTER_STATIC_NOERR @@ -6579,17 +6579,17 @@ H5D__chunk_dump_index_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) FUNC_LEAVE_NOAPI(H5_ITER_CONT) } /* H5D__chunk_dump_index_cb() */ - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_dump_index + * Function: H5D__chunk_dump_index * - * Purpose: Prints information about the storage index to the specified - * stream. + * Purpose: Prints information about the storage index to the specified + * stream. * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Wednesday, April 28, 1999 * *------------------------------------------------------------------------- @@ -6637,17 +6637,17 @@ done: } /* end H5D__chunk_dump_index() */ #ifdef H5D_CHUNK_DEBUG - + /*------------------------------------------------------------------------- - * Function: H5D__chunk_stats + * Function: H5D__chunk_stats * - * Purpose: Print raw data cache statistics to the debug stream. If - * HEADERS is non-zero then print table column headers, - * otherwise assume that the H5AC layer has already printed them. + * Purpose: Print raw data cache statistics to the debug stream. If + * HEADERS is non-zero then print table column headers, + * otherwise assume that the H5AC layer has already printed them. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Thursday, May 21, 1998 * *------------------------------------------------------------------------- @@ -6655,9 +6655,9 @@ done: herr_t H5D__chunk_stats(const H5D_t *dset, hbool_t headers) { - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); - double miss_rate; - char ascii[32]; + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); + double miss_rate; + char ascii[32]; herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_NOERR @@ -6701,22 +6701,22 @@ done: } /* end H5D__chunk_stats() */ #endif /* H5D_CHUNK_DEBUG */ - + /*------------------------------------------------------------------------- - * Function: H5D__nonexistent_readvv_cb + * Function: H5D__nonexistent_readvv_cb * - * Purpose: Callback operation for performing fill value I/O operation + * Purpose: Callback operation for performing fill value I/O operation * on memory buffer. * - * Note: This algorithm is pretty inefficient about initializing and + * Note: This algorithm is pretty inefficient about initializing and * terminating the fill buffer info structure and it would be * faster to refactor this into a "real" initialization routine, * and a "vectorized fill" routine. -QAK * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * 30 Sep 2010 + * Programmer: Quincey Koziol + * 30 Sep 2010 * *------------------------------------------------------------------------- */ @@ -6750,24 +6750,24 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__nonexistent_readvv_cb() */ - + /*------------------------------------------------------------------------- - * Function: H5D__nonexistent_readvv + * Function: H5D__nonexistent_readvv * - * Purpose: When the chunk doesn't exist on disk and the chunk is bigger + * Purpose: When the chunk doesn't exist on disk and the chunk is bigger * than the cache size, performs fill value I/O operation on * memory buffer, advancing through two I/O vectors, until one * runs out. * - * Note: This algorithm is pretty inefficient about initializing and + * Note: This algorithm is pretty inefficient about initializing and * terminating the fill buffer info structure and it would be * faster to refactor this into a "real" initialization routine, * and a "vectorized fill" routine. -QAK * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Raymond Lu - * 6 Feb 2009 + * Programmer: Raymond Lu + * 6 Feb 2009 * *------------------------------------------------------------------------- */ @@ -6804,7 +6804,7 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__nonexistent_readvv() */ - + /*------------------------------------------------------------------------- * Function: H5D__chunk_is_partial_edge_chunk * @@ -6843,14 +6843,14 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_is_partial_edge_chunk() */ - + /*------------------------------------------------------------------------- * Function: H5D__chunk_file_alloc() * - * Purpose: Chunk allocation: - * Create the chunk if it doesn't exist, or reallocate the + * Purpose: Chunk allocation: + * Create the chunk if it doesn't exist, or reallocate the * chunk if its size changed. - * The coding is moved and modified from each index structure. + * The coding is moved and modified from each index structure. * * Return: Non-negative on success/Negative on failure * @@ -6862,8 +6862,8 @@ herr_t H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old_chunk, H5F_block_t *new_chunk, hbool_t *need_insert, const hsize_t *scaled) { - hbool_t alloc_chunk = FALSE; /* Whether to allocate chunk */ - herr_t ret_value = SUCCEED; /* Return value */ + hbool_t alloc_chunk = FALSE; /* Whether to allocate chunk */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -6881,7 +6881,7 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old /* Check for filters on chunks */ if(idx_info->pline->nused > 0) { /* Sanity/error checking block */ - HDassert(idx_info->storage->idx_type != H5D_CHUNK_IDX_NONE); + HDassert(idx_info->storage->idx_type != H5D_CHUNK_IDX_NONE); { unsigned allow_chunk_size_len; /* Allowed size of encoded chunk size */ unsigned new_chunk_size_len; /* Size of encoded chunk size */ @@ -6931,9 +6931,9 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old } /* end else */ } /* end if */ else { - HDassert(!H5F_addr_defined(new_chunk->offset)); - HDassert(new_chunk->length == idx_info->layout->size); - alloc_chunk = TRUE; + HDassert(!H5F_addr_defined(new_chunk->offset)); + HDassert(new_chunk->length == idx_info->layout->size); + alloc_chunk = TRUE; } /* end else */ /* Actually allocate space for the chunk in the file */ @@ -6973,11 +6973,11 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old HDassert(H5F_addr_defined(new_chunk->offset)); -done: +done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_file_alloc() */ - + /*------------------------------------------------------------------------- * Function: H5D__chunk_format_convert_cb * @@ -6995,13 +6995,13 @@ done: static int H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) { - H5D_chunk_it_ud5_t *udata = (H5D_chunk_it_ud5_t *)_udata; /* User data */ - H5D_chk_idx_info_t *new_idx_info; /* The new chunk index information */ - H5D_chunk_ud_t insert_udata; /* Chunk information to be inserted */ - haddr_t chunk_addr; /* Chunk address */ - size_t nbytes; /* Chunk size */ - void *buf = NULL; /* Pointer to buffer of chunk data */ - int ret_value = H5_ITER_CONT; /* Return value */ + H5D_chunk_it_ud5_t *udata = (H5D_chunk_it_ud5_t *)_udata; /* User data */ + H5D_chk_idx_info_t *new_idx_info; /* The new chunk index information */ + H5D_chunk_ud_t insert_udata; /* Chunk information to be inserted */ + haddr_t chunk_addr; /* Chunk address */ + size_t nbytes; /* Chunk size */ + void *buf = NULL; /* Pointer to buffer of chunk data */ + int ret_value = H5_ITER_CONT; /* Return value */ FUNC_ENTER_STATIC @@ -7019,7 +7019,7 @@ H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) unsigned filter_mask = chunk_rec->filter_mask; H5Z_cb_t filter_cb; /* Filter failure callback struct */ - size_t read_size = nbytes; /* Bytes to read */ + size_t read_size = nbytes; /* Bytes to read */ HDassert(read_size == new_idx_info->layout->size); @@ -7074,12 +7074,12 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_format_convert_cb() */ - + /*------------------------------------------------------------------------- * Function: H5D__chunk_format_convert * * Purpose: Iterate over the chunks for the current chunk index and insert the - * the chunk addresses into v1 B-tree chunk index via callback. + * the chunk addresses into v1 B-tree chunk index via callback. * * Return: Non-negative on success/Negative on failure * @@ -7091,7 +7091,7 @@ done: herr_t H5D__chunk_format_convert(H5D_t *dset, H5D_chk_idx_info_t *idx_info, H5D_chk_idx_info_t *new_idx_info) { - H5D_chunk_it_ud5_t udata; /* User data */ + H5D_chunk_it_ud5_t udata; /* User data */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -7106,13 +7106,13 @@ H5D__chunk_format_convert(H5D_t *dset, H5D_chk_idx_info_t *idx_info, H5D_chk_idx /* Iterate over the chunks in the current index and insert the chunk addresses into version 1 B-tree index */ if((idx_info->storage->ops->iterate)(idx_info, H5D__chunk_format_convert_cb, &udata) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk index to chunk info") + HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk index to chunk info") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_format_convert() */ - + /*------------------------------------------------------------------------- * Function: H5D__get_num_chunks_cb * @@ -7144,7 +7144,7 @@ H5D__get_num_chunks_cb(const H5D_chunk_rec_t H5_ATTR_UNUSED *chunk_rec, void *_u FUNC_LEAVE_NOAPI(ret_value) } /* H5D__get_num_chunks_cb() */ - + /*------------------------------------------------------------------------- * Function: H5D__get_num_chunks * @@ -7206,7 +7206,7 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__get_num_chunks() */ - + /*------------------------------------------------------------------------- * Function: H5D__get_chunk_info_cb * @@ -7255,7 +7255,7 @@ H5D__get_chunk_info_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) FUNC_LEAVE_NOAPI(ret_value) } /* H5D__get_chunk_info_cb() */ - + /*------------------------------------------------------------------------- * Function: H5D__get_chunk_info * @@ -7344,7 +7344,7 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__get_chunk_info() */ - + /*------------------------------------------------------------------------- * Function: H5D__get_chunk_info_by_coord_cb * @@ -7392,7 +7392,7 @@ H5D__get_chunk_info_by_coord_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) FUNC_LEAVE_NOAPI(ret_value) } /* H5D__get_chunk_info_by_coord_cb() */ - + /*------------------------------------------------------------------------- * Function: H5D__get_chunk_info_by_coord * diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c index e48c3b3..655f660 100644 --- a/src/H5Dcontig.c +++ b/src/H5Dcontig.c @@ -843,7 +843,7 @@ H5D__contig_readvv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len, * the end of the data element, and don't read more than * the buffer size. */ - min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size); + min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size); H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t); /* Read the new sieve buffer */ @@ -1038,7 +1038,7 @@ H5D__contig_writevv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len, max_data = store_contig->dset_size - dst_off; /* Compute the size of the sieve buffer */ - min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size); + min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size); H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t); /* Check if there is any point in reading the data from the file */ @@ -1154,7 +1154,7 @@ H5D__contig_writevv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len, * the end of the data element, and don't read more than * the buffer size. */ - min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size); + min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size); H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t); /* Check if there is any point in reading the data from the file */ diff --git a/src/H5Dearray.c b/src/H5Dearray.c index b23ac46..eaa8c46 100644 --- a/src/H5Dearray.c +++ b/src/H5Dearray.c @@ -615,7 +615,7 @@ H5D__earray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx, * Failure: NULL * * Programmer: Vailin Choi; July 2010 - * + * *------------------------------------------------------------------------- */ static void * @@ -933,7 +933,7 @@ H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info) /* General parameters */ if(idx_info->pline->nused > 0) { unsigned chunk_size_len; /* Size of encoded chunk size */ - + /* Compute the size required for encoding the size of a chunk, allowing * for an extra byte, in case the filter makes the chunk larger. */ @@ -1349,7 +1349,7 @@ H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, } /* end if */ udata.cb = chunk_cb; udata.udata = chunk_udata; - + /* Iterate over the extensible array elements */ if((ret_value = H5EA_iterate(ea, H5D__earray_idx_iterate_cb, &udata)) < 0) HERROR(H5E_DATASET, H5E_BADITER, "unable to iterate over fixed array chunk index"); diff --git a/src/H5Dfarray.c b/src/H5Dfarray.c index 2d85e3b..cbaa52d 100644 --- a/src/H5Dfarray.c +++ b/src/H5Dfarray.c @@ -14,8 +14,8 @@ /* Programmer: Vailin Choi * Thursday, April 30, 2009 * - * Purpose: Fixed array indexed (chunked) I/O functions. - * The chunk coordinate is mapped as an index into an array of + * Purpose: Fixed array indexed (chunked) I/O functions. + * The chunk coordinate is mapped as an index into an array of * disk addresses for the chunks. * */ @@ -910,7 +910,7 @@ H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info) /* Set up the user data */ udata.f = idx_info->f; udata.chunk_size = idx_info->layout->size; - + /* Create the fixed array for the chunk index */ if(NULL == (idx_info->storage->u.farray.fa = H5FA_create(idx_info->f, &cparam, &udata))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create fixed array") @@ -961,7 +961,7 @@ H5D__farray_idx_is_space_alloc(const H5O_storage_chunk_t *storage) * Return: Non-negative on success/Negative on failure * * Programmer: Vailin Choi; 5 May 2014 - * + * *------------------------------------------------------------------------- */ static herr_t @@ -1224,7 +1224,7 @@ H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info, } /* end if */ udata.cb = chunk_cb; udata.udata = chunk_udata; - + /* Iterate over the fixed array elements */ if((ret_value = H5FA_iterate(fa, H5D__farray_idx_iterate_cb, &udata)) < 0) HERROR(H5E_DATASET, H5E_BADITER, "unable to iterate over fixed array chunk index"); diff --git a/src/H5Dfill.c b/src/H5Dfill.c index f5a5238..94c7b1c 100644 --- a/src/H5Dfill.c +++ b/src/H5Dfill.c @@ -565,7 +565,7 @@ done: */ herr_t H5D__fill_refill_vl(H5D_fill_buf_info_t *fb_info, size_t nelmts) -{ +{ herr_t ret_value = SUCCEED; /* Return value */ void * buf = NULL; /* Temporary fill buffer */ diff --git a/src/H5Dint.c b/src/H5Dint.c index 4750ad6..9f7e004 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -1418,6 +1418,7 @@ done: HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, NULL, "unable to reset external file list info") if(new_dset->shared->space && H5S_close(new_dset->shared->space) < 0) HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release dataspace") + if(new_dset->shared->type) { if(new_dset->shared->type_id > 0) { if(H5I_dec_ref(new_dset->shared->type_id) < 0) @@ -1428,6 +1429,7 @@ done: HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release datatype") } /* end else */ } /* end if */ + if(H5F_addr_defined(new_dset->oloc.addr)) { if(H5O_dec_rc_by_loc(&(new_dset->oloc)) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL, "unable to decrement refcount on newly created object") diff --git a/src/H5Dio.c b/src/H5Dio.c index 3d49df7..84b7e7a 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -321,7 +321,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Dwrite_chunk * - * Purpose: Writes an entire chunk to the file directly. + * Purpose: Writes an entire chunk to the file directly. * * Return: Non-negative on success/Negative on failure * @@ -331,13 +331,13 @@ done: *------------------------------------------------------------------------- */ herr_t -H5Dwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *offset, +H5Dwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *offset, size_t data_size, const void *buf) { H5VL_object_t *vol_obj = NULL; uint32_t data_size_32; /* Chunk data size (limited to 32-bits currently) */ herr_t ret_value = SUCCEED; /* Return value */ - + FUNC_ENTER_API(FAIL) H5TRACE6("e", "iiIu*hz*x", dset_id, dxpl_id, filters, offset, data_size, buf); diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 91557c3..492902b 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -364,13 +364,13 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space, /* Check to see if the process is reading the entire dataset */ if(H5S_GET_SELECT_TYPE(file_space) != H5S_SEL_ALL) - local_cause[1] |= H5D_MPIO_RANK0_NOT_H5S_ALL; + local_cause[1] |= H5D_MPIO_RANK0_NOT_H5S_ALL; /* Only perform this optimization for contigous datasets, currently */ else if(H5D_CONTIGUOUS != io_info->dset->shared->layout.type) - /* Flag to do a MPI_Bcast of the data from one proc instead of + /* Flag to do a MPI_Bcast of the data from one proc instead of * having all the processes involved in the collective I/O. */ - local_cause[1] |= H5D_MPIO_RANK0_NOT_CONTIGUOUS; + local_cause[1] |= H5D_MPIO_RANK0_NOT_CONTIGUOUS; else if((is_vl_storage = H5T_is_vl_storage(type_info->dset_type)) < 0) local_cause[0] |= H5D_MPIO_ERROR_WHILE_CHECKING_COLLECTIVE_POSSIBLE; else if(is_vl_storage) @@ -394,7 +394,7 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space, dset_size = ((hsize_t)snelmts) * type_size; /* If the size of the dataset is less than 2GB then do an MPI_Bcast - * of the data from one process instead of having all the processes + * of the data from one process instead of having all the processes * involved in the collective I/O. */ if(dset_size > ((hsize_t)(2.0F * H5_GB) - 1)) @@ -402,7 +402,7 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space, } /* end else */ } /* end else */ } /* end else */ - + /* Check for independent I/O */ if(local_cause[0] & H5D_MPIO_SET_INDEPENDENT) global_cause[0] = local_cause[0]; @@ -812,7 +812,7 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf io_option = H5D_ONE_LINK_CHUNK_IO; /*no opt*/ /* direct request to multi-chunk-io */ else if(H5FD_MPIO_CHUNK_MULTI_IO == chunk_opt_mode) - io_option = H5D_MULTI_CHUNK_IO; + io_option = H5D_MULTI_CHUNK_IO; /* via default path. branch by num threshold */ else { unsigned one_link_chunk_io_threshold; /* Threshold to use single collective I/O for all chunks */ @@ -1123,9 +1123,9 @@ if(H5DEBUG(D)) /* Obtain MPI derived datatype from all individual chunks */ for(u = 0; u < num_chunk; u++) { - hsize_t *permute_map = NULL; /* array that holds the mapping from the old, - out-of-order displacements to the in-order - displacements of the MPI datatypes of the + hsize_t *permute_map = NULL; /* array that holds the mapping from the old, + out-of-order displacements to the in-order + displacements of the MPI datatypes of the point selection of the file space */ hbool_t is_permuted = FALSE; @@ -1135,8 +1135,8 @@ if(H5DEBUG(D)) * where it will be freed. */ if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.fspace, - type_info->src_type_size, - &chunk_ftype[u], /* OUT: datatype created */ + type_info->src_type_size, + &chunk_ftype[u], /* OUT: datatype created */ &chunk_mpi_file_counts[u], /* OUT */ &(chunk_mft_is_derived_array[u]), /* OUT */ TRUE, /* this is a file space, @@ -1154,9 +1154,9 @@ if(H5DEBUG(D)) if(is_permuted) HDassert(permute_map); if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.mspace, - type_info->dst_type_size, &chunk_mtype[u], - &chunk_mpi_mem_counts[u], - &(chunk_mbt_is_derived_array[u]), + type_info->dst_type_size, &chunk_mtype[u], + &chunk_mpi_mem_counts[u], + &(chunk_mbt_is_derived_array[u]), FALSE, /* this is a memory space, so if the file space is not @@ -2016,9 +2016,9 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf if((file_space != NULL) && (mem_space != NULL)) { int mpi_file_count; /* Number of file "objects" to transfer */ - hsize_t *permute_map = NULL; /* array that holds the mapping from the old, - out-of-order displacements to the in-order - displacements of the MPI datatypes of the + hsize_t *permute_map = NULL; /* array that holds the mapping from the old, + out-of-order displacements to the in-order + displacements of the MPI datatypes of the point selection of the file space */ hbool_t is_permuted = FALSE; @@ -2027,8 +2027,8 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf * and will be fed into the next call to H5S_mpio_space_type * where it will be freed. */ - if(H5S_mpio_space_type(file_space, type_info->src_type_size, - &mpi_file_type, &mpi_file_count, &mft_is_derived, /* OUT: datatype created */ + if(H5S_mpio_space_type(file_space, type_info->src_type_size, + &mpi_file_type, &mpi_file_count, &mft_is_derived, /* OUT: datatype created */ TRUE, /* this is a file space, so permute the datatype if the point selection is out of @@ -2037,13 +2037,13 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf the permutation of points selected in case they are out of - order */ + order */ &is_permuted /* OUT */) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type") /* Sanity check */ if(is_permuted) HDassert(permute_map); - if(H5S_mpio_space_type(mem_space, type_info->src_type_size, + if(H5S_mpio_space_type(mem_space, type_info->src_type_size, &mpi_buf_type, &mpi_buf_count, &mbt_is_derived, /* OUT: datatype created */ FALSE, /* this is a memory space, so if the file space is not @@ -2055,7 +2055,7 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf generated by the file_space selection and applied to the - memory selection */, + memory selection */, &is_permuted /* IN */) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buffer type") /* Sanity check */ @@ -2627,7 +2627,7 @@ H5D__construct_filtered_io_info_list(const H5D_io_info_t *io_info, const H5D_typ local_info_array[i].num_writers = 0; local_info_array[i].owners.original_owner = local_info_array[i].owners.new_owner = mpi_rank; local_info_array[i].buf = NULL; - + local_info_array[i].async_info.num_receive_requests = 0; local_info_array[i].async_info.receive_buffer_array = NULL; local_info_array[i].async_info.receive_requests_array = NULL; diff --git a/src/H5Dnone.c b/src/H5Dnone.c index 2093512..5ba3f5b 100644 --- a/src/H5Dnone.c +++ b/src/H5Dnone.c @@ -14,7 +14,7 @@ /* Programmer: Vailin Choi * September 2010 * - * Purpose: Implicit (Non Index) chunked I/O functions. + * Purpose: Implicit (Non Index) chunked I/O functions. * This is used when the dataset is: * extendible but with fixed max. dims * with early allocation @@ -114,8 +114,8 @@ const H5D_chunk_ops_t H5D_COPS_NONE[1] = {{ * Function: H5D__none_idx_create * * Purpose: Allocate memory for the maximum # of chunks in the dataset. - * - * Return: Non-negative on success + * + * Return: Non-negative on success * Negative on failure. * * Programmer: Vailin Choi; September 2010 @@ -472,7 +472,7 @@ H5D__none_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr) /*------------------------------------------------------------------------- * Function: H5D__none_idx_dump * - * Purpose: Dump + * Purpose: Dump * * Return: Non-negative on success/Negative on failure * diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index 281da81..b6ec06d 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -150,7 +150,7 @@ H5_DLL herr_t H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, void *buf/*out*/); H5_DLL herr_t H5Dwrite(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, const void *buf); -H5_DLL herr_t H5Dwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, +H5_DLL herr_t H5Dwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *offset, size_t data_size, const void *buf); H5_DLL herr_t H5Dread_chunk(hid_t dset_id, hid_t dxpl_id, const hsize_t *offset, uint32_t *filters, void *buf); @@ -193,7 +193,7 @@ H5_DLL herr_t H5Dget_chunk_index_type(hid_t did, H5D_chunk_index_t *idx_type); #define H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME "direct_chunk_read_flag" #define H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME "direct_chunk_read_offset" #define H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME "direct_chunk_read_filters" - + /* Typedefs */ diff --git a/src/H5Dsingle.c b/src/H5Dsingle.c index 8efa771..33274bb 100644 --- a/src/H5Dsingle.c +++ b/src/H5Dsingle.c @@ -14,7 +14,7 @@ /* Programmer: Vailin Choi * May 2011; updated 10/2015 * - * Purpose: Single Chunk I/O functions. + * Purpose: Single Chunk I/O functions. * This is used when the dataset has only 1 chunk (with or without filter): * cur_dims[] is equal to max_dims[] is equal to the chunk dims[] * non-filter chunk record: [address of the chunk] @@ -150,8 +150,8 @@ H5D__single_idx_init(const H5D_chk_idx_info_t *idx_info, * Function: H5D__single_idx_create * * Purpose: Set up Single Chunk Index: filtered or non-filtered - * - * Return: Non-negative on success + * + * Return: Non-negative on success * Negative on failure. * * Programmer: Vailin Choi; July 2011 diff --git a/src/H5E.c b/src/H5E.c index 07eaf18..f204864 100644 --- a/src/H5E.c +++ b/src/H5E.c @@ -364,7 +364,7 @@ H5E__get_stack(void) /* No associated value with current thread - create one */ #ifdef H5_HAVE_WIN_THREADS /* Win32 has to use LocalAlloc to match the LocalFree in DllMain */ - estack = (H5E_t *)LocalAlloc(LPTR, sizeof(H5E_t)); + estack = (H5E_t *)LocalAlloc(LPTR, sizeof(H5E_t)); #else /* Use HDmalloc here since this has to match the HDfree in the * destructor and we want to avoid the codestack there. @@ -1045,7 +1045,7 @@ done: * * Programmer: Raymond Lu * Friday, July 15, 2003 - * + * *------------------------------------------------------------------------- */ herr_t diff --git a/src/H5EAcache.c b/src/H5EAcache.c index 1d182a4..affa127 100644 --- a/src/H5EAcache.c +++ b/src/H5EAcache.c @@ -117,7 +117,7 @@ static herr_t H5EA__cache_dblk_page_get_initial_load_size(void *udata, size_t *i static htri_t H5EA__cache_dblk_page_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr); static void *H5EA__cache_dblk_page_deserialize(const void *image, size_t len, void *udata, hbool_t *dirty); -static herr_t H5EA__cache_dblk_page_image_len(const void *thing, +static herr_t H5EA__cache_dblk_page_image_len(const void *thing, size_t *image_len); static herr_t H5EA__cache_dblk_page_serialize(const H5F_t *f, void *image, size_t len, void *thing); @@ -1467,12 +1467,12 @@ H5EA__cache_dblock_get_initial_load_size(void *_udata, size_t *image_len)) HDmemset(&dblock, 0, sizeof(dblock)); /* need to set: - * + * * dblock.hdr * dblock.npages * dblock.nelmts * - * before we invoke either H5EA_DBLOCK_PREFIX_SIZE() or + * before we invoke either H5EA_DBLOCK_PREFIX_SIZE() or * H5EA_DBLOCK_SIZE(). */ dblock.hdr = udata->hdr; @@ -1564,7 +1564,7 @@ H5EA__cache_dblock_deserialize(const void *_image, size_t H5_ATTR_NDEBUG_UNUSED if(NULL == (dblock = H5EA__dblock_alloc(udata->hdr, udata->parent, udata->nelmts))) H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array data block") - HDassert(((!dblock->npages) && (len == H5EA_DBLOCK_SIZE(dblock))) || + HDassert(((!dblock->npages) && (len == H5EA_DBLOCK_SIZE(dblock))) || (len == H5EA_DBLOCK_PREFIX_SIZE(dblock))); /* Set the extensible array data block's information */ diff --git a/src/H5EApkg.h b/src/H5EApkg.h index 0adcde2..bb3f39c 100644 --- a/src/H5EApkg.h +++ b/src/H5EApkg.h @@ -219,10 +219,10 @@ typedef struct H5EA_hdr_t { * of the extensible array header. * * The field is used to avoid duplicate - * setups of the flush dependency - * relationship, and to allow the + * setups of the flush dependency + * relationship, and to allow the * extensible array header to destroy - * the flush dependency on receipt of + * the flush dependency on receipt of * an eviction notification from the * metadata cache. */ @@ -448,7 +448,7 @@ H5_DLL herr_t H5EA__dblock_dest(H5EA_dblock_t *dblock); H5_DLL H5EA_dblk_page_t *H5EA__dblk_page_alloc(H5EA_hdr_t *hdr, H5EA_sblock_t *parent); H5_DLL herr_t H5EA__dblk_page_create(H5EA_hdr_t *hdr, H5EA_sblock_t *parent, haddr_t addr); -H5_DLL H5EA_dblk_page_t *H5EA__dblk_page_protect(H5EA_hdr_t *hdr, +H5_DLL H5EA_dblk_page_t *H5EA__dblk_page_protect(H5EA_hdr_t *hdr, H5EA_sblock_t *parent, haddr_t dblk_page_addr, unsigned flags); H5_DLL herr_t H5EA__dblk_page_unprotect(H5EA_dblk_page_t *dblk_page, unsigned cache_flags); diff --git a/src/H5EAtest.c b/src/H5EAtest.c index 58e255a..8926d6a 100644 --- a/src/H5EAtest.c +++ b/src/H5EAtest.c @@ -346,10 +346,10 @@ END_FUNC(STATIC) /* end H5EA__test_debug() */ * Function: H5EA__test_crt_dbg_context * * Purpose: Create context for debugging callback - * + * * Return: Success: non-NULL * Failure: NULL - * + * * Programmer: Vailin Choi; August 2010 * *------------------------------------------------------------------------- @@ -357,10 +357,10 @@ END_FUNC(STATIC) /* end H5EA__test_debug() */ BEGIN_FUNC(STATIC, ERR, void *, NULL, NULL, H5EA__test_crt_dbg_context(H5F_t H5_ATTR_UNUSED *f, haddr_t H5_ATTR_UNUSED obj_addr)) - + /* Local variables */ H5EA__ctx_cb_t *ctx; /* Context for callbacks */ - + /* Allocate new context structure */ if(NULL == (ctx = H5FL_MALLOC(H5EA__ctx_cb_t))) H5E_THROW(H5E_CANTALLOC, "can't allocate extensible array client callback context") diff --git a/src/H5Eint.c b/src/H5Eint.c index 735f0f2..ca7031b 100644 --- a/src/H5Eint.c +++ b/src/H5Eint.c @@ -994,7 +994,7 @@ H5E_dump_api_stack(hbool_t is_api) #ifdef H5_NO_DEPRECATED_SYMBOLS if(estack->auto_op.func2) (void)((estack->auto_op.func2)(H5E_DEFAULT, estack->auto_data)); -#else /* H5_NO_DEPRECATED_SYMBOLS */ +#else /* H5_NO_DEPRECATED_SYMBOLS */ if(estack->auto_op.vers == 1) { if(estack->auto_op.func1) (void)((estack->auto_op.func1)(estack->auto_data)); diff --git a/src/H5Epkg.h b/src/H5Epkg.h index 86b5b73..83f3816 100644 --- a/src/H5Epkg.h +++ b/src/H5Epkg.h @@ -79,7 +79,7 @@ typedef struct { typedef struct { H5E_auto2_t func2; /* Only the new style callback function is available. */ } H5E_auto_op_t; -#endif /* H5_NO_DEPRECATED_SYMBOLS */ +#endif /* H5_NO_DEPRECATED_SYMBOLS */ /* Some syntactic sugar to make the compiler happy with two different kinds of callbacks */ typedef struct { -- cgit v0.12