summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2006-06-27 14:45:06 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2006-06-27 14:45:06 (GMT)
commit7be3afb278aea67ba09a97f4b41c0aaaf5c47983 (patch)
tree24ed86ab2a5c982fbf182d2ac8cd892c3813bc34 /src
parent8d72542a50fac7a747fe0bfec8d2285de8efd29f (diff)
downloadhdf5-7be3afb278aea67ba09a97f4b41c0aaaf5c47983.zip
hdf5-7be3afb278aea67ba09a97f4b41c0aaaf5c47983.tar.gz
hdf5-7be3afb278aea67ba09a97f4b41c0aaaf5c47983.tar.bz2
[svn-r12440] Purpose:
Code cleanup Description: Trim trailing whitespace in Makefile.am and C/C++ source files to make diffing changes easier. Platforms tested: None necessary, whitespace only change
Diffstat (limited to 'src')
-rw-r--r--src/H5.c6
-rw-r--r--src/H5A.c4
-rw-r--r--src/H5AC.c316
-rw-r--r--src/H5ACpkg.h110
-rw-r--r--src/H5ACprivate.h4
-rw-r--r--src/H5ACpublic.h30
-rw-r--r--src/H5B2private.h2
-rw-r--r--src/H5Bprivate.h2
-rw-r--r--src/H5C.c306
-rw-r--r--src/H5CS.c8
-rw-r--r--src/H5Cpkg.h48
-rw-r--r--src/H5Cprivate.h38
-rw-r--r--src/H5D.c2
-rw-r--r--src/H5Dcompact.c6
-rw-r--r--src/H5Dcontig.c4
-rw-r--r--src/H5Dio.c20
-rw-r--r--src/H5Distore.c26
-rw-r--r--src/H5Dmpio.c276
-rw-r--r--src/H5Doh.c2
-rw-r--r--src/H5E.c26
-rw-r--r--src/H5Eterm.h266
-rw-r--r--src/H5F.c6
-rw-r--r--src/H5FDfamily.c2
-rw-r--r--src/H5FDmpi.h4
-rw-r--r--src/H5FDmpio.c36
-rw-r--r--src/H5FDmpiposix.c10
-rw-r--r--src/H5FDmulti.c2
-rw-r--r--src/H5FS.c2
-rw-r--r--src/H5FSprivate.h2
-rw-r--r--src/H5G.c40
-rw-r--r--src/H5Gloc.c2
-rw-r--r--src/H5Gname.c2
-rw-r--r--src/H5Gnode.c6
-rw-r--r--src/H5Gobj.c4
-rw-r--r--src/H5Gpublic.h2
-rw-r--r--src/H5HFcache.c4
-rw-r--r--src/H5HFhdr.c2
-rw-r--r--src/H5HFiter.c2
-rw-r--r--src/H5HFprivate.h2
-rw-r--r--src/H5HL.c20
-rw-r--r--src/H5O.c122
-rw-r--r--src/H5Oattr.c32
-rw-r--r--src/H5Ocont.c8
-rw-r--r--src/H5Odtype.c4
-rw-r--r--src/H5Oefl.c8
-rw-r--r--src/H5Olayout.c10
-rw-r--r--src/H5Olinfo.c2
-rw-r--r--src/H5Olink.c16
-rw-r--r--src/H5Opkg.h6
-rw-r--r--src/H5Opline.c4
-rw-r--r--src/H5Oprivate.h6
-rw-r--r--src/H5Oshared.c12
-rw-r--r--src/H5Ostab.c18
-rwxr-xr-xsrc/H5Pocpl.c14
-rw-r--r--src/H5Shyper.c12
-rw-r--r--src/H5Stest.c6
-rw-r--r--src/H5T.c34
-rw-r--r--src/H5Tconv.c42
-rw-r--r--src/H5Tpkg.h24
-rw-r--r--src/H5Vprivate.h10
-rw-r--r--src/H5private.h2
61 files changed, 1022 insertions, 1022 deletions
diff --git a/src/H5.c b/src/H5.c
index 0bbe225..7ca10d5 100644
--- a/src/H5.c
+++ b/src/H5.c
@@ -3127,11 +3127,11 @@ H5_trace (const double *returning, const char *func, const char *type, ...)
* Function: HDrand/HDsrand
*
* Purpose: Wrapper function for rand. If rand_r exists on this system,
- * use it.
+ * use it.
*
* Wrapper function for srand. If rand_r is available, it will keep
* track of the seed locally instead of using srand() which modifies
- * global state and can break other programs.
+ * global state and can break other programs.
*
* Return: Success: Random number from 0 to RAND_MAX
*
@@ -3154,7 +3154,7 @@ int HDrand(void)
void HDsrand(unsigned int seed)
{
- g_seed = seed;
+ g_seed = seed;
}
#endif
diff --git a/src/H5A.c b/src/H5A.c
index abd70b5..43759b8 100644
--- a/src/H5A.c
+++ b/src/H5A.c
@@ -1071,7 +1071,7 @@ H5Aget_create_plist(hid_t attr_id)
/* Set the character encoding on the new property list */
if(H5P_set(new_plist, H5A_CHAR_ENCODING_NAME, &(attr->encoding)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set character encoding")
-
+
ret_value = new_plist_id;
done:
@@ -1700,7 +1700,7 @@ H5A_close(H5A_t *attr)
/* Free temporary buffer */
tmp_buf = H5FL_BLK_FREE(attr_buf, tmp_buf);
} /* end if */
-
+
/* Free dynamicly allocated items */
if(H5A_free(attr) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't release attribute info")
diff --git a/src/H5AC.c b/src/H5AC.c
index 798c0b5..cf794c6 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -78,19 +78,19 @@ H5FL_DEFINE_STATIC(H5AC_aux_t);
* structure H5AC_slist_entry_t
*
* The dirty entry list maintained via the d_slist_ptr field of H5AC_aux_t
- * and the cleaned entry list maintained via the c_slist_ptr field of
- * H5AC_aux_t are just lists of the file offsets of the dirty/cleaned
- * entries. Unfortunately, the slist code makes us define a dynamically
- * allocated structure to store these offsets in. This structure serves
+ * and the cleaned entry list maintained via the c_slist_ptr field of
+ * H5AC_aux_t are just lists of the file offsets of the dirty/cleaned
+ * entries. Unfortunately, the slist code makes us define a dynamically
+ * allocated structure to store these offsets in. This structure serves
* that purpose. Its fields are as follows:
*
- * magic: Unsigned 32 bit integer always set to
- * H5AC__H5AC_SLIST_ENTRY_T_MAGIC. This field is used to
+ * magic: Unsigned 32 bit integer always set to
+ * H5AC__H5AC_SLIST_ENTRY_T_MAGIC. This field is used to
* validate pointers to instances of H5AC_slist_entry_t.
- *
+ *
* addr: file offset of a metadata entry. Entries are added to this
* list (if they aren't there already) when they are marked
- * dirty in an unprotect, inserted, or renamed. They are
+ * dirty in an unprotect, inserted, or renamed. They are
* removed when they appear in a clean entries broadcast.
*
****************************************************************************/
@@ -455,9 +455,9 @@ H5AC_term_interface(void)
* through the function.
* JRM - 4/7/05
*
- * Added code allocating and initializing the auxilary
+ * Added code allocating and initializing the auxilary
* structure (an instance of H5AC_aux_t), and linking it
- * to the instance of H5C_t created by H5C_create(). At
+ * to the instance of H5C_t created by H5C_create(). At
* present, the auxilary structure is only used in PHDF5.
*
* JRM - 6/28/05
@@ -465,7 +465,7 @@ H5AC_term_interface(void)
* Added code to set the prefix if required.
*
* JRM - 1/20/06
- *
+ *
* Added code to initialize the new write_done field.
*
* JRM - 5/11/06
@@ -530,17 +530,17 @@ H5AC_create(const H5F_t *f,
if ( (mpi_rank = H5F_mpi_get_rank(f)) < 0 ) {
- HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi rank")
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi rank")
}
if ( (mpi_size = H5F_mpi_get_size(f)) < 0 ) {
- HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi size")
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi size")
}
- /* There is no point in setting up the auxilary structure if size
- * is less than or equal to 1, as there will never be any processes
- * to broadcast the clean lists to.
+ /* There is no point in setting up the auxilary structure if size
+ * is less than or equal to 1, as there will never be any processes
+ * to broadcast the clean lists to.
*/
if ( mpi_size > 1 ) {
@@ -550,13 +550,13 @@ H5AC_create(const H5F_t *f,
"Can't allocate H5AC auxilary structure.")
} else {
-
+
aux_ptr->magic = H5AC__H5AC_AUX_T_MAGIC;
aux_ptr->mpi_comm = mpi_comm;
aux_ptr->mpi_rank = mpi_rank;
aux_ptr->mpi_size = mpi_size;
aux_ptr->write_permitted = FALSE;
- aux_ptr->dirty_bytes_threshold =
+ aux_ptr->dirty_bytes_threshold =
H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD;
aux_ptr->dirty_bytes = 0;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
@@ -578,28 +578,28 @@ H5AC_create(const H5F_t *f,
}
if ( mpi_rank == 0 ) {
-
- aux_ptr->d_slist_ptr =
+
+ aux_ptr->d_slist_ptr =
H5SL_create(H5SL_TYPE_HADDR,0.5,(size_t)16);
if ( aux_ptr->d_slist_ptr == NULL ) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
"can't create dirtied entry list.")
}
-
- aux_ptr->c_slist_ptr =
+
+ aux_ptr->c_slist_ptr =
H5SL_create(H5SL_TYPE_HADDR,0.5,(size_t)16);
if ( aux_ptr->c_slist_ptr == NULL ) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
"can't create cleaned entry list.")
}
}
}
- if ( aux_ptr != NULL ) {
+ if ( aux_ptr != NULL ) {
if ( aux_ptr->mpi_rank == 0 ) {
@@ -661,7 +661,7 @@ H5AC_create(const H5F_t *f,
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
- }
+ }
#ifdef H5_HAVE_PARALLEL
else if ( aux_ptr != NULL ) {
@@ -739,10 +739,10 @@ done:
*
* JRM - 6/7/04
*
- * Added code to free the auxiliary structure and its
+ * Added code to free the auxiliary structure and its
* associated slist if present.
* JRM - 6/28/05
- *
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -857,8 +857,8 @@ done:
*
* Complete re-write. See above for details. -- JRM 5/11/04
*
- * Abstracted the guts of the function to H5C_flush_cache()
- * in H5C.c, and then re-wrote the function as a wrapper for
+ * Abstracted the guts of the function to H5C_flush_cache()
+ * in H5C.c, and then re-wrote the function as a wrapper for
* H5C_flush_cache().
*
* JRM - 6/7/04
@@ -895,9 +895,9 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id, unsigned flags)
if ( aux_ptr != NULL ) {
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d::H5AC_flush: (u/uu/i/iu/r/ru) = %d/%d/%d/%d/%d/%d\n",
- (int)(aux_ptr->mpi_rank),
+ (int)(aux_ptr->mpi_rank),
(int)(aux_ptr->unprotect_dirty_bytes),
(int)(aux_ptr->unprotect_dirty_bytes_updates),
(int)(aux_ptr->insert_dirty_bytes),
@@ -907,7 +907,7 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id, unsigned flags)
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
/* to prevent "messages from the future" we must synchronize all
- * processes before we start the flush. Hence the following
+ * processes before we start the flush. Hence the following
* barrier.
*/
if ( MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)) ) {
@@ -959,10 +959,10 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id, unsigned flags)
} /* end if ( aux_ptr != NULL ) */
#endif /* H5_HAVE_PARALLEL */
- status = H5C_flush_cache(f,
- dxpl_id,
- H5AC_noblock_dxpl_id,
- f->shared->cache,
+ status = H5C_flush_cache(f,
+ dxpl_id,
+ H5AC_noblock_dxpl_id,
+ f->shared->cache,
flags);
if ( status < 0 ) {
@@ -997,7 +997,7 @@ done:
* 4/27/06
*
* Modifications:
- *
+ *
* None.
*
*-------------------------------------------------------------------------
@@ -1020,7 +1020,7 @@ H5AC_get_entry_status(H5F_t * f,
FUNC_ENTER_NOAPI(H5AC_get_entry_status, FAIL)
- if ( ( cache_ptr == NULL ) ||
+ if ( ( cache_ptr == NULL ) ||
( cache_ptr->magic != H5C__H5C_T_MAGIC ) ||
( ! H5F_addr_defined(addr) ) ||
( status_ptr == NULL ) ) {
@@ -1041,16 +1041,16 @@ H5AC_get_entry_status(H5F_t * f,
status |= H5AC_ES__IN_CACHE;
- if ( is_dirty )
+ if ( is_dirty )
status |= H5AC_ES__IS_DIRTY;
- if ( is_protected )
+ if ( is_protected )
status |= H5AC_ES__IS_PROTECTED;
- if ( is_pinned )
+ if ( is_pinned )
status |= H5AC_ES__IS_PINNED;
}
-
+
*status_ptr = status;
done:
@@ -1210,7 +1210,7 @@ done:
* Purpose: Mark a pinned entry as dirty. The target entry MUST be
* be pinned, and MUST be unprotected.
*
- * If the entry has changed size, the function updates
+ * If the entry has changed size, the function updates
* data structures for the size change.
*
* Return: Non-negative on success/Negative on failure
@@ -1277,8 +1277,8 @@ H5AC_mark_pinned_entry_dirty(H5F_t * f,
}
#endif /* H5_HAVE_PARALLEL */
- result = H5C_mark_pinned_entry_dirty(cache_ptr,
- thing,
+ result = H5C_mark_pinned_entry_dirty(cache_ptr,
+ thing,
size_changed,
new_size);
if ( result < 0 ) {
@@ -1298,7 +1298,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5AC_mark_pinned_or_protected_entry_dirty
*
- * Purpose: Mark a pinned or protected entry as dirty. The target
+ * Purpose: Mark a pinned or protected entry as dirty. The target
* entry MUST be either pinned, protected, or both.
*
* Unlike H5AC_mark_pinned_entry_dirty(), this function does
@@ -1406,7 +1406,7 @@ done:
* the PHDF5 case. It should have no effect on either the
* serial or FPHSD5 cases.
*
- * Note that this code presumes that the renamed entry will
+ * Note that this code presumes that the renamed entry will
* be present in all caches -- which it must be at present.
* To maintain this invarient, only rename entries immediately
* after you unprotect them.
@@ -1461,7 +1461,7 @@ H5AC_rename(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t new_ad
if ( ( aux_ptr != NULL ) &&
( aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold ) ) {
- result = H5AC_propagate_flushed_and_still_clean_entries_list(f,
+ result = H5AC_propagate_flushed_and_still_clean_entries_list(f,
H5AC_noblock_dxpl_id,
f->shared->cache,
TRUE);
@@ -1525,7 +1525,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5AC_protect
*
- * Purpose: If the target entry is not in the cache, load it. If
+ * Purpose: If the target entry is not in the cache, load it. If
* necessary, attempt to evict one or more entries to keep
* the cache within its maximum size.
*
@@ -1533,8 +1533,8 @@ done:
* to the caller. The caller must call H5AC_unprotect() when
* finished with the entry.
*
- * While it is protected, the entry may not be either evicted
- * or flushed -- nor may it be accessed by another call to
+ * While it is protected, the entry may not be either evicted
+ * or flushed -- nor may it be accessed by another call to
* H5AC_protect. Any attempt to do so will result in a failure.
*
* This comment is a re-write of the original Purpose: section.
@@ -1575,8 +1575,8 @@ done:
* Purpose section above.
*
* JRM - 6/7/04
- * Abstracted the guts of the function to H5C_protect()
- * in H5C.c, and then re-wrote the function as a wrapper for
+ * Abstracted the guts of the function to H5C_protect()
+ * in H5C.c, and then re-wrote the function as a wrapper for
* H5C_protect().
*
*-------------------------------------------------------------------------
@@ -1630,7 +1630,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5AC_unpin_entry()
*
- * Purpose: Unpin a cache entry. The entry must be unprotected at
+ * Purpose: Unpin a cache entry. The entry must be unprotected at
* the time of call, and must be pinned.
*
* Return: Non-negative on success/Negative on failure
@@ -1741,18 +1741,18 @@ done:
*
* JRM - 7/5/05
* Added code to track dirty byte generation, and to trigger
- * clean entry list propagation when it exceeds a user
+ * clean entry list propagation when it exceeds a user
* specified threshold. Note that this code only applies in
* the PHDF5 case. It should have no effect on either the
* serial or FPHSD5 cases.
*
* JRM - 9/8/05
* Added code to track entry size changes. This is necessary
- * as it can effect dirty byte creation counts, thereby
+ * as it can effect dirty byte creation counts, thereby
* throwing the caches out of sync in the PHDF5 case.
*
* JRM - 5/16/06
- * Added code to use the new dirtied field in
+ * Added code to use the new dirtied field in
* H5C_cache_entry_t in the test to see if the entry has
* been dirtied.
*
@@ -1786,7 +1786,7 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
dirtied = ( ( (flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG ) ||
( ((H5AC_info_t *)thing)->dirtied ) );
- if ( dirtied ) {
+ if ( dirtied ) {
if ( (type->size)(f, thing, &new_size) < 0 ) {
@@ -1805,7 +1805,7 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
if ( ( dirtied ) && ( ((H5AC_info_t *)thing)->is_dirty == FALSE ) &&
( NULL != (aux_ptr = f->shared->cache->aux_ptr) ) ) {
- result = H5AC_log_dirtied_entry(f->shared->cache,
+ result = H5AC_log_dirtied_entry(f->shared->cache,
(H5AC_info_t *)thing,
addr,
size_changed,
@@ -1980,7 +1980,7 @@ done:
* Reworked for the addition of struct H5AC_cache_config_t.
*
* JRM - 10/25/05
- * Added support for the new dirty_bytes_threshold field of
+ * Added support for the new dirty_bytes_threshold field of
* both H5AC_cache_config_t and H5AC_aux_t.
*
*-------------------------------------------------------------------------
@@ -1996,21 +1996,21 @@ H5AC_get_cache_auto_resize_config(H5AC_t * cache_ptr,
FUNC_ENTER_NOAPI(H5AC_get_cache_auto_resize_config, FAIL)
- if ( ( cache_ptr == NULL )
+ if ( ( cache_ptr == NULL )
||
#ifdef H5_HAVE_PARALLEL
- ( ( cache_ptr->aux_ptr != NULL )
- &&
- ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
- !=
- H5AC__H5AC_AUX_T_MAGIC
+ ( ( cache_ptr->aux_ptr != NULL )
+ &&
+ ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
+ !=
+ H5AC__H5AC_AUX_T_MAGIC
)
- )
+ )
||
#endif /* H5_HAVE_PARALLEL */
- ( config_ptr == NULL )
+ ( config_ptr == NULL )
||
- ( config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION )
+ ( config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION )
)
{
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
@@ -2060,7 +2060,7 @@ H5AC_get_cache_auto_resize_config(H5AC_t * cache_ptr,
#ifdef H5_HAVE_PARALLEL
if ( cache_ptr->aux_ptr != NULL ) {
- config_ptr->dirty_bytes_threshold =
+ config_ptr->dirty_bytes_threshold =
((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold;
} else {
@@ -2225,7 +2225,7 @@ done:
* Updated for the addition of H5AC_cache_config_t.
*
* John Mainzer -- 1025/05
- * Added support for the new dirty_bytes_threshold field of
+ * Added support for the new dirty_bytes_threshold field of
* both H5AC_cache_config_t and H5AC_aux_t.
*
*-------------------------------------------------------------------------
@@ -2244,14 +2244,14 @@ H5AC_set_cache_auto_resize_config(H5AC_t * cache_ptr,
if ( ( cache_ptr == NULL )
#ifdef H5_HAVE_PARALLEL
||
- ( ( cache_ptr->aux_ptr != NULL )
- &&
- (
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
- !=
- H5AC__H5AC_AUX_T_MAGIC
+ ( ( cache_ptr->aux_ptr != NULL )
+ &&
+ (
+ ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
+ !=
+ H5AC__H5AC_AUX_T_MAGIC
)
- )
+ )
#endif /* H5_HAVE_PARALLEL */
) {
@@ -2275,16 +2275,16 @@ H5AC_set_cache_auto_resize_config(H5AC_t * cache_ptr,
"config_ptr->rpt_fcn_enabled must be either TRUE or FALSE.")
}
- if (
- (
- config_ptr->dirty_bytes_threshold
- <
+ if (
+ (
+ config_ptr->dirty_bytes_threshold
+ <
H5AC__MIN_DIRTY_BYTES_THRESHOLD
)
||
- (
- config_ptr->dirty_bytes_threshold
- >
+ (
+ config_ptr->dirty_bytes_threshold
+ >
H5AC__MAX_DIRTY_BYTES_THRESHOLD
)
) {
@@ -2311,7 +2311,7 @@ H5AC_set_cache_auto_resize_config(H5AC_t * cache_ptr,
#ifdef H5_HAVE_PARALLEL
if ( cache_ptr->aux_ptr != NULL ) {
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold =
+ ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold =
config_ptr->dirty_bytes_threshold;
}
#endif /* H5_HAVE_PARALLEL */
@@ -2381,7 +2381,7 @@ H5AC_validate_config(H5AC_cache_config_t * config_ptr)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
"dirty_bytes_threshold too small.")
- } else
+ } else
if ( config_ptr->dirty_bytes_threshold > H5AC__MAX_DIRTY_BYTES_THRESHOLD ) {
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
@@ -2423,7 +2423,7 @@ done:
* list, and also remove any matching entries from the dirtied
* slist.
*
- * This function must only be called by the process with
+ * This function must only be called by the process with
* MPI_rank 0.
*
* Return SUCCEED on success, and FAIL on failure.
@@ -2463,7 +2463,7 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
HDassert( aux_ptr->mpi_rank == 0 );
HDassert( aux_ptr->c_slist_ptr != NULL );
- HDassert( H5SL_count(aux_ptr->c_slist_ptr) ==
+ HDassert( H5SL_count(aux_ptr->c_slist_ptr) ==
(size_t)(aux_ptr->c_slist_len) );
@@ -2479,7 +2479,7 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
- }
+ }
if ( num_entries > 0 )
{
@@ -2518,7 +2518,7 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
i++;
/* now remove the entry from the cleaned entry list */
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -2533,17 +2533,17 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
HDassert( aux_ptr->c_slist_len >= 0 );
- /* and also remove the matching entry from the dirtied list
+ /* and also remove the matching entry from the dirtied list
* if it exists.
*/
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
(void *)(&addr))) != NULL ) {
- HDassert( slist_entry_ptr->magic ==
+ HDassert( slist_entry_ptr->magic ==
H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -2562,15 +2562,15 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
} /* while */
- /* Now broadcast the list of cleaned entries -- if there is one.
+ /* Now broadcast the list of cleaned entries -- if there is one.
*
* The peculiar structure of the following call to MPI_Bcast is
* due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
+ * Thus the element type is MPI_BYTE, with size equal to the
* buf_size computed above.
*/
- mpi_result = MPI_Bcast((void *)buf_ptr, (int)buf_size, MPI_BYTE, 0,
+ mpi_result = MPI_Bcast((void *)buf_ptr, (int)buf_size, MPI_BYTE, 0,
aux_ptr->mpi_comm);
if ( mpi_result != MPI_SUCCESS ) {
@@ -2612,11 +2612,11 @@ done:
* Modifications:
*
* John Mainzer, 9/23/05
- * Rewrote function to return the value of the
+ * Rewrote function to return the value of the
* write_permitted field in aux structure if the structure
- * exists and mpi_rank is 0.
+ * exists and mpi_rank is 0.
*
- * If the aux structure exists, but mpi_rank isn't 0, the
+ * If the aux structure exists, but mpi_rank isn't 0, the
* function now returns FALSE.
*
* In all other cases, the function returns TRUE.
@@ -2679,7 +2679,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5AC_ext_config_2_int_config()
*
- * Purpose: Utility function to translate an instance of
+ * Purpose: Utility function to translate an instance of
* H5AC_cache_config_t to an instance of H5C_auto_size_ctl_t.
*
* Places translation in *int_conf_ptr and returns SUCCEED
@@ -2808,14 +2808,14 @@ H5AC_log_deleted_entry(H5AC_t * cache_ptr,
HDassert( aux_ptr->c_slist_ptr != NULL );
/* if the entry appears in the dirtied entry slist, remove it. */
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
(void *)(&addr))) != NULL ) {
- HDassert( slist_entry_ptr->magic ==
+ HDassert( slist_entry_ptr->magic ==
H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -2832,14 +2832,14 @@ H5AC_log_deleted_entry(H5AC_t * cache_ptr,
}
/* if the entry appears in the cleaned entry slist, remove it. */
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
(void *)(&addr))) != NULL ) {
- HDassert( slist_entry_ptr->magic ==
+ HDassert( slist_entry_ptr->magic ==
H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -2876,7 +2876,7 @@ done:
* If mpi_rank is 0, we must first check to see if the entry
* appears in the dirty entries slist. If it is, do nothing.
* If it isn't, add the size to th dirty_bytes count, add the
- * entry to the dirty entries slist, and remove it from the
+ * entry to the dirty entries slist, and remove it from the
* cleaned list (if it is present there).
*
* Return SUCCEED on success, and FAIL on failure.
@@ -2965,14 +2965,14 @@ H5AC_log_dirtied_entry(H5AC_t * cache_ptr,
/* the entry is dirty. If it exists on the cleaned entries list,
* remove it.
*/
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
(void *)(&addr))) != NULL ) {
- HDassert( slist_entry_ptr->magic ==
+ HDassert( slist_entry_ptr->magic ==
H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -3010,12 +3010,12 @@ done:
* Function: H5AC_log_flushed_entry()
*
* Purpose: Update the clean entry slist for the flush of an entry --
- * specifically, if the entry has been cleared, remove it
+ * specifically, if the entry has been cleared, remove it
* from both the cleaned and dirtied lists if it is present.
- * Otherwise, if the entry was dirty, insert the indicated
+ * Otherwise, if the entry was dirty, insert the indicated
* entry address in the clean slist if it isn't there already.
*
- * This function is only used in PHDF5, and should only
+ * This function is only used in PHDF5, and should only
* be called for the process with mpi rank 0.
*
* Return SUCCEED on success, and FAIL on failure.
@@ -3046,8 +3046,8 @@ H5AC_log_flushed_entry_dummy(H5C_t * cache_ptr,
aux_ptr = cache_ptr->aux_ptr;
if ( ( was_dirty ) && ( (flags & H5C__FLUSH_CLEAR_ONLY_FLAG) == 0 ) ) {
-
- HDfprintf(stdout,
+
+ HDfprintf(stdout,
"%d:H5AC_log_flushed_entry(): addr = %d, flags = %x, was_dirty = %d, type_id = %d\n",
(int)(aux_ptr->mpi_rank), (int)addr, flags, (int)was_dirty, type_id);
}
@@ -3091,13 +3091,13 @@ H5AC_log_flushed_entry(H5C_t * cache_ptr,
* cleaned list and the dirtied list.
*/
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
(void *)(&addr))) != NULL ) {
HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -3113,13 +3113,13 @@ H5AC_log_flushed_entry(H5C_t * cache_ptr,
HDassert( aux_ptr->c_slist_len >= 0 );
}
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
(void *)(&addr))) != NULL ) {
HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -3177,7 +3177,7 @@ done:
* If mpi_rank isnt 0, this simply means adding the size
* of the entry to the dirty_bytes count.
*
- * If mpi_rank is 0, we must also add the entry to the
+ * If mpi_rank is 0, we must also add the entry to the
* dirty entries slist.
*
* Return SUCCEED on success, and FAIL on failure.
@@ -3246,7 +3246,7 @@ H5AC_log_inserted_entry(H5F_t * f,
slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
slist_entry_ptr->addr = addr;
- if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
+ if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
&(slist_entry_ptr->addr)) < 0 ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
@@ -3266,7 +3266,7 @@ H5AC_log_inserted_entry(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Inserted entry in clean slist.")
}
- }
+ }
aux_ptr->dirty_bytes += size;
@@ -3292,14 +3292,14 @@ done:
* WARNING
*
* At present, the way that the rename call is used ensures
- * that the renamed entry is present in all caches by
+ * that the renamed entry is present in all caches by
* renaming in a collective operation and immediately after
* unprotecting the target entry.
*
* This function uses this invarient, and will cause arcane
* failures if it is not met. If maintaining this invarient
* becomes impossible, we will have to rework this function
- * extensively, and likely include a bit of IPC for
+ * extensively, and likely include a bit of IPC for
* synchronization. A better option might be to subsume
* rename in the unprotect operation.
*
@@ -3308,17 +3308,17 @@ done:
*
* For processes with mpi rank other 0, it simply checks to
* see if the entry was dirty prior to the rename, and adds
- * the entries size to the dirty bytes count.
+ * the entries size to the dirty bytes count.
*
* In the process with mpi rank 0, the function first checks
* to see if the entry was dirty prior to the rename. If it
* was, and if the entry doesn't appear in the dirtied list
- * under its old address, it adds the entry's size to the
+ * under its old address, it adds the entry's size to the
* dirty bytes count.
*
- * The rank 0 process then removes any references to the
- * entry under its old address from the cleands and dirtied
- * lists, and inserts an entry in the dirtied list under the
+ * The rank 0 process then removes any references to the
+ * entry under its old address from the cleands and dirtied
+ * lists, and inserts an entry in the dirtied list under the
* new address.
*
* Return SUCCEED on success, and FAIL on failure.
@@ -3373,7 +3373,7 @@ H5AC_log_renamed_entry(H5AC_t * cache_ptr,
HDassert( aux_ptr->c_slist_ptr != NULL );
/* if the entry appears in the cleaned entry slist, under its old
- * address, remove it.
+ * address, remove it.
*/
if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
(void *)(&old_addr))) != NULL ) {
@@ -3422,9 +3422,9 @@ H5AC_log_renamed_entry(H5AC_t * cache_ptr,
HDassert( aux_ptr->d_slist_len >= 0 );
} else {
-
- /* otherwise, allocate a new entry that is ready
- * for insertion, and increment dirty_bytes.
+
+ /* otherwise, allocate a new entry that is ready
+ * for insertion, and increment dirty_bytes.
*
* Note that the fact that the entry wasn't in the dirtied
* list under its old address implies that it must have
@@ -3489,12 +3489,12 @@ done:
* Function: H5AC_propagate_flushed_and_still_clean_entries_list
*
* Purpose: In PHDF5, only the metadata cache with mpi rank 0 is allowed
- * to write to file. All other metadata caches on processes
+ * to write to file. All other metadata caches on processes
* with rank greater than 0 must retain dirty entries until
* they are notified that the entry is now clean.
*
* This function is the main routine for that proceedure.
- * It must be called simultaniously on all processes that
+ * It must be called simultaniously on all processes that
* have the relevant file open. To this end, there must
* be a barrier immediately prior to this call.
*
@@ -3502,21 +3502,21 @@ done:
*
* 1) Dirty byte creation exceeds some user specified value.
*
- * While metadata reads may occur independently, all
+ * While metadata reads may occur independently, all
* operations writing metadata must be collective. Thus
* all metadata caches see the same sequence of operations,
* and therefore the same dirty data creation.
*
* This fact is used to synchronize the caches for purposes
* of propagating the list of flushed and still clean
- * entries, by simply calling this function from all
+ * entries, by simply calling this function from all
* caches whenever some user specified threshold on dirty
* data is exceeded.
*
- * 2) Under direct user control -- this operation must be
+ * 2) Under direct user control -- this operation must be
* collective.
*
- * The operations to be managed by this function are as
+ * The operations to be managed by this function are as
* follows:
*
* For the process with mpi rank 0:
@@ -3525,10 +3525,10 @@ done:
* and then disable writes again.
*
* 2) Load the contents of the flushed and still clean entries
- * list (c_slist_ptr) into a buffer, and broadcast that
+ * list (c_slist_ptr) into a buffer, and broadcast that
* buffer to all the other caches.
*
- * 3) Clear the flushed and still clean entries list
+ * 3) Clear the flushed and still clean entries list
* (c_slist_ptr).
*
*
@@ -3542,7 +3542,7 @@ done:
* For all processes:
*
* 1) Reset the dirtied bytes count to 0.
- *
+ *
* Return: Success: non-negative
*
* Failure: negative
@@ -3560,8 +3560,8 @@ done:
#ifdef H5_HAVE_PARALLEL
herr_t
-H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
- hid_t dxpl_id,
+H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
+ hid_t dxpl_id,
H5AC_t * cache_ptr,
hbool_t do_barrier)
{
@@ -3581,10 +3581,10 @@ H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:H5AC_propagate...:%d: (u/uu/i/iu/r/ru) = %d/%d/%d/%d/%d/%d\n",
- (int)(aux_ptr->mpi_rank),
- (int)(aux_ptr->dirty_bytes_propagations),
+ (int)(aux_ptr->mpi_rank),
+ (int)(aux_ptr->dirty_bytes_propagations),
(int)(aux_ptr->unprotect_dirty_bytes),
(int)(aux_ptr->unprotect_dirty_bytes_updates),
(int)(aux_ptr->insert_dirty_bytes),
@@ -3610,7 +3610,7 @@ H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
aux_ptr->write_permitted = TRUE;
- result = H5C_flush_to_min_clean(f, dxpl_id, H5AC_noblock_dxpl_id,
+ result = H5C_flush_to_min_clean(f, dxpl_id, H5AC_noblock_dxpl_id,
cache_ptr);
aux_ptr->write_permitted = FALSE;
@@ -3636,7 +3636,7 @@ H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
} else {
- if ( H5AC_receive_and_apply_clean_list(f, dxpl_id,
+ if ( H5AC_receive_and_apply_clean_list(f, dxpl_id,
H5AC_noblock_dxpl_id,
cache_ptr) < 0 ) {
@@ -3671,7 +3671,7 @@ done:
* Purpose: Receive the list of cleaned entries from process 0,
* and mark the specified entries as clean.
*
- * This function must only be called by the process with
+ * This function must only be called by the process with
* MPI_rank greater than 0.
*
* Return SUCCEED on success, and FAIL on failure.
@@ -3737,7 +3737,7 @@ H5AC_receive_and_apply_clean_list(H5F_t * f,
"memory allocation failed for receive buffer")
}
- haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) *
+ haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) *
(size_t)num_entries);
if ( haddr_buf_ptr == NULL ) {
@@ -3747,15 +3747,15 @@ H5AC_receive_and_apply_clean_list(H5F_t * f,
}
- /* Now receive the list of cleaned entries
+ /* Now receive the list of cleaned entries
*
* The peculiar structure of the following call to MPI_Bcast is
* due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
+ * Thus the element type is MPI_BYTE, with size equal to the
* buf_size computed above.
*/
- mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size,
+ mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size,
MPI_BYTE, 0, aux_ptr->mpi_comm);
if ( mpi_result != MPI_SUCCESS ) {
@@ -3795,7 +3795,7 @@ done:
if ( MPI_Offset_buf_ptr != NULL ) {
- MPI_Offset_buf_ptr =
+ MPI_Offset_buf_ptr =
(MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
}
diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h
index e31f245..fc51ef0 100644
--- a/src/H5ACpkg.h
+++ b/src/H5ACpkg.h
@@ -21,7 +21,7 @@
* Source files outside the H5AC package should include
* H5ACprivate.h instead.
*
- * The one exception to this rule is testpar/t_cache.c. The
+ * The one exception to this rule is testpar/t_cache.c. The
* test code is easier to write if it can look at H5AC_aux_t.
* Indeed, this is the main reason why this file was created.
*/
@@ -51,9 +51,9 @@
#define H5AC_DEBUG_DIRTY_BYTES_CREATION 0
/*-------------------------------------------------------------------------
- * It is a bit difficult to set ranges of allowable values on the
- * dirty_bytes_threshold field of H5AC_aux_t. The following are
- * probably broader than they should be.
+ * It is a bit difficult to set ranges of allowable values on the
+ * dirty_bytes_threshold field of H5AC_aux_t. The following are
+ * probably broader than they should be.
*-------------------------------------------------------------------------
*/
@@ -71,71 +71,71 @@
* are some features of the metadata cache that are specific to it, and which
* therefore do not belong in the more generic H5C cache code.
*
- * In particular, there is the matter of synchronizing writes from the
+ * In particular, there is the matter of synchronizing writes from the
* metadata cache to disk in the PHDF5 case.
*
- * Prior to this update, the presumption was that all metadata caches would
- * write the same data at the same time since all operations modifying
- * metadata must be performed collectively. Given this assumption, it was
- * safe to allow only the writes from process 0 to actually make it to disk,
+ * Prior to this update, the presumption was that all metadata caches would
+ * write the same data at the same time since all operations modifying
+ * metadata must be performed collectively. Given this assumption, it was
+ * safe to allow only the writes from process 0 to actually make it to disk,
* while metadata writes from all other processes were discarded.
*
- * Unfortunately, this presumption is in error as operations that read
- * metadata need not be collective, but can change the location of dirty
- * entries in the metadata cache LRU lists. This can result in the same
- * metadata write operation triggering writes from the metadata caches on
- * some processes, but not all (causing a hang), or in different sets of
- * entries being written from different caches (potentially resulting in
+ * Unfortunately, this presumption is in error as operations that read
+ * metadata need not be collective, but can change the location of dirty
+ * entries in the metadata cache LRU lists. This can result in the same
+ * metadata write operation triggering writes from the metadata caches on
+ * some processes, but not all (causing a hang), or in different sets of
+ * entries being written from different caches (potentially resulting in
* metadata corruption in the file).
*
* To deal with this issue, I decided to apply a paradigm shift to the way
* metadata is written to disk.
*
- * With this set of changes, only the metadata cache on process 0 is able
- * to write metadata to disk, although metadata caches on all other
+ * With this set of changes, only the metadata cache on process 0 is able
+ * to write metadata to disk, although metadata caches on all other
* processes can read metadata from disk as before.
*
* To keep all the other caches from getting plugged up with dirty metadata,
* process 0 periodically broadcasts a list of entries that it has flushed
* since that last notice, and which are currently clean. The other caches
- * mark these entries as clean as well, which allows them to evict the
+ * mark these entries as clean as well, which allows them to evict the
* entries as needed.
*
* One obvious problem in this approach is synchronizing the broadcasts
- * and receptions, as different caches may see different amounts of
- * activity.
+ * and receptions, as different caches may see different amounts of
+ * activity.
*
- * The current solution is for the caches to track the number of bytes
- * of newly generated dirty metadata, and to broadcast and receive
+ * The current solution is for the caches to track the number of bytes
+ * of newly generated dirty metadata, and to broadcast and receive
* whenever this value exceeds some user specified threshold.
*
* Maintaining this count is easy for all processes not on process 0 --
- * all that is necessary is to add the size of the entry to the total
+ * all that is necessary is to add the size of the entry to the total
* whenever there is an insertion, a rename of a previously clean entry,
* or whever a previously clean entry is marked dirty in an unprotect.
*
* On process 0, we have to be careful not to count dirty bytes twice.
- * If an entry is marked dirty, flushed, and marked dirty again, all
- * within a single reporting period, it only th first marking should
- * be added to the dirty bytes generated tally, as that is all that
+ * If an entry is marked dirty, flushed, and marked dirty again, all
+ * within a single reporting period, it only th first marking should
+ * be added to the dirty bytes generated tally, as that is all that
* the other processes will see.
*
* At present, this structure exists to maintain the fields needed to
* implement the above scheme, and thus is only used in the parallel
* case. However, other uses may arise in the future.
*
- * Instance of this structure are associated with metadata caches via
- * the aux_ptr field of H5C_t (see H5Cpkg.h). The H5AC code is
+ * Instance of this structure are associated with metadata caches via
+ * the aux_ptr field of H5C_t (see H5Cpkg.h). The H5AC code is
* responsible for allocating, maintaining, and discarding instances
- * of H5AC_aux_t.
+ * of H5AC_aux_t.
*
* The remainder of this header comments documents the individual fields
* of the structure.
*
* JRM - 6/27/05
*
- * magic: Unsigned 32 bit integer always set to
- * H5AC__H5AC_AUX_T_MAGIC. This field is used to validate
+ * magic: Unsigned 32 bit integer always set to
+ * H5AC__H5AC_AUX_T_MAGIC. This field is used to validate
* pointers to instances of H5AC_aux_t.
*
* mpi_comm: MPI communicator associated with the file for which the
@@ -146,14 +146,14 @@
* mpi_size: Number of processes in mpi_comm.
*
* write_permitted: Boolean flag used to control whether the cache
- * is permitted to write to file.
+ * is permitted to write to file.
*
- * dirty_bytes_threshold: Integer field containing the dirty bytes
- * generation threashold. Whenever dirty byte creation
- * exceeds this value, the metadata cache on process 0
+ * dirty_bytes_threshold: Integer field containing the dirty bytes
+ * generation threashold. Whenever dirty byte creation
+ * exceeds this value, the metadata cache on process 0
* broadcasts a list of the entries it has flushed since
* the last broadcast (or since the beginning of execution)
- * and which are currently clean (if they are still in the
+ * and which are currently clean (if they are still in the
* cache)
*
* Similarly, metadata caches on processes other than process
@@ -161,16 +161,16 @@
* the threshold is exceeded.
*
* dirty_bytes: Integer field containing the number of bytes of dirty
- * metadata generated since the beginning of the computation,
- * or (more typically) since the last clean entries list
+ * metadata generated since the beginning of the computation,
+ * or (more typically) since the last clean entries list
* broadcast. This field is reset to zero after each such
* broadcast.
*
- * dirty_bytes_propagations: This field only exists when the
+ * dirty_bytes_propagations: This field only exists when the
* H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
*
* It is used to track the number of times the cleaned list
- * has been propagated from process 0 to the other
+ * has been propagated from process 0 to the other
* processes.
*
* unprotect_dirty_bytes: This field only exists when the
@@ -184,7 +184,7 @@
* H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
*
* It is used to track the number of times dirty bytes have
- * been created via unprotect operations since the last time
+ * been created via unprotect operations since the last time
* the cleaned list was propagated.
*
* insert_dirty_bytes: This field only exists when the
@@ -198,7 +198,7 @@
* H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
*
* It is used to track the number of times dirty bytes have
- * been created via insert operations since the last time
+ * been created via insert operations since the last time
* the cleaned list was propagated.
*
* rename_dirty_bytes: This field only exists when the
@@ -212,7 +212,7 @@
* H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
*
* It is used to track the number of times dirty bytes have
- * been created via rename operations since the last time
+ * been created via rename operations since the last time
* the cleaned list was propagated.
*
* d_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
@@ -231,36 +231,36 @@
* 2) a previously clean entry is renamed, and it does not
* already appear in the dirty entry list, or
*
- * 3) a previously clean entry is unprotected with the
- * dirtied flag set and the entry does not already appear
+ * 3) a previously clean entry is unprotected with the
+ * dirtied flag set and the entry does not already appear
* in the dirty entry list.
*
* Entries are added to the dirty entry list whever they cause
- * the dirty bytes count to be increased. They are removed
+ * the dirty bytes count to be increased. They are removed
* when they appear in a clean entries broadcast. Note that
* renames must be reflected in the dirty entry list.
*
- * To reitterate, this field is only used on process 0 -- it
+ * To reitterate, this field is only used on process 0 -- it
* should be NULL on all other processes.
*
- * d_slist_len: Integer field containing the number of entries in the
- * dirty entry list. This field should always contain the
+ * d_slist_len: Integer field containing the number of entries in the
+ * dirty entry list. This field should always contain the
* value 0 on all processes other than process 0. It exists
* primarily for sanity checking.
*
- * c_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
+ * c_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
* of entries that were dirty, have been flushed
* to disk since the last clean entries broadcast, and are
* still clean. Since only process 0 can write to disk, this
* list only exists on process 0.
*
* In essence, this slist is used to assemble the contents of
- * the next clean entries broadcast. The list emptied after
+ * the next clean entries broadcast. The list emptied after
* each broadcast.
- *
+ *
* c_slist_len: Integer field containing the number of entries in the clean
- * entries list (*c_slist_ptr). This field should always
- * contain the value 0 on all processes other than process 0.
+ * entries list (*c_slist_ptr). This field should always
+ * contain the value 0 on all processes other than process 0.
* It exists primarily for sanity checking.
*
* write_done: In the parallel test bed, it is necessary to ensure that
@@ -297,7 +297,7 @@ typedef struct H5AC_aux_t
int32_t dirty_bytes;
-#if H5AC_DEBUG_DIRTY_BYTES_CREATION
+#if H5AC_DEBUG_DIRTY_BYTES_CREATION
int32_t dirty_bytes_propagations;
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 6dcd88c..c3433be 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -229,7 +229,7 @@ extern hid_t H5AC_ind_dxpl_id;
#define H5AC__FLUSH_MARKED_ENTRIES_FLAG H5C__FLUSH_MARKED_ENTRIES_FLAG
-/* #defines of flags used to report entry status in the
+/* #defines of flags used to report entry status in the
* H5AC_get_entry_status() call.
*/
@@ -261,7 +261,7 @@ H5_DLL herr_t H5AC_mark_pinned_entry_dirty(H5F_t * f,
void * thing,
hbool_t size_changed,
size_t new_size);
-H5_DLL herr_t H5AC_mark_pinned_or_protected_entry_dirty(H5F_t * f,
+H5_DLL herr_t H5AC_mark_pinned_or_protected_entry_dirty(H5F_t * f,
void * thing);
H5_DLL herr_t H5AC_rename(H5F_t *f, const H5AC_class_t *type,
haddr_t old_addr, haddr_t new_addr);
diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h
index 5db8c5a..81d3319 100644
--- a/src/H5ACpublic.h
+++ b/src/H5ACpublic.h
@@ -40,16 +40,16 @@ extern "C" {
* structure H5AC_cache_config_t
*
* H5AC_cache_config_t is a public structure intended for use in public APIs.
- * At least in its initial incarnation, it is basicaly a copy of struct
- * H5C_auto_size_ctl_t, minus the report_fcn field, and plus the
- * dirty_bytes_threshold field.
+ * At least in its initial incarnation, it is basicaly a copy of struct
+ * H5C_auto_size_ctl_t, minus the report_fcn field, and plus the
+ * dirty_bytes_threshold field.
*
- * The report_fcn field is omitted, as including it would require us to
+ * The report_fcn field is omitted, as including it would require us to
* make H5C_t structure public.
*
* The dirty_bytes_threshold field does not appear in H5C_auto_size_ctl_t,
* as synchronization between caches on different processes is handled at
- * the H5AC level, not at the level of H5C. Note however that there is
+ * the H5AC level, not at the level of H5C. Note however that there is
* considerable interaction between this value and the other fields in this
* structure.
*
@@ -235,31 +235,31 @@ extern "C" {
* The value of this field must be in the range [0.0, 1.0]. I would
* expect typical values to be in the range of 0.01 to 0.1.
*
- *
+ *
* Parallel Configuration Fields:
*
* In PHDF5, all operations that modify metadata must be executed collectively.
- * We used to think that this was enough to ensure consistency across the
+ * We used to think that this was enough to ensure consistency across the
* metadata caches, but since we allow processes to read metadata individually,
- * the order of dirty entries in the LRU list can vary across processes,
+ * the order of dirty entries in the LRU list can vary across processes,
* which can result in inconsistencies between the caches.
*
- * To prevent this, only the metadata cache on process 0 is allowed to write
+ * To prevent this, only the metadata cache on process 0 is allowed to write
* to file, and then only after synchronizing with the other caches. After
* it writes entries to file, it sends the base addresses of the now clean
* entries to the other caches, so they can mark these entries clean as well.
*
- * The different caches know when to synchronize caches by counting the
+ * The different caches know when to synchronize caches by counting the
* number of bytes of dirty metadata created by the collective operations
- * modifying metadata. Whenever this count exceeds a user specified
- * threshold (see below), process 0 flushes down to its minimum clean size,
+ * modifying metadata. Whenever this count exceeds a user specified
+ * threshold (see below), process 0 flushes down to its minimum clean size,
* and then sends the list of newly cleaned entries to the other caches.
*
- * dirty_bytes_threshold: Threshold of dirty byte creation used to
- * synchronize updates between caches. (See above for outline and
+ * dirty_bytes_threshold: Threshold of dirty byte creation used to
+ * synchronize updates between caches. (See above for outline and
* motivation.)
*
- * This value MUST be consistant across all processes accessing the
+ * This value MUST be consistant across all processes accessing the
* file. This field is ignored unless HDF5 has been compiled for
* parallel.
*
diff --git a/src/H5B2private.h b/src/H5B2private.h
index 537921f..6f90ef6 100644
--- a/src/H5B2private.h
+++ b/src/H5B2private.h
@@ -98,7 +98,7 @@ typedef struct H5B2_class_t {
/*****************************/
/* Library-private Variables */
/*****************************/
-
+
/***************************************/
/* Library-private Function Prototypes */
/***************************************/
diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h
index 246d0c5..c94d1a6 100644
--- a/src/H5Bprivate.h
+++ b/src/H5Bprivate.h
@@ -134,7 +134,7 @@ typedef struct H5B_class_t {
/*****************************/
/* Library-private Variables */
/*****************************/
-
+
/* Declare a free list to manage the H5B_shared_t struct */
H5FL_EXTERN(H5B_shared_t);
diff --git a/src/H5C.c b/src/H5C.c
index 15fc671..dd0d56d 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -201,11 +201,11 @@
* JRM - 1/10/05
*
* - Added the H5C__DLL_UPDATE_FOR_SIZE_CHANGE macro and the associated
- * sanity checking macros. These macro are used to update the size of
+ * sanity checking macros. These macro are used to update the size of
* a DLL when one of its entries changes size.
*
* JRM - 9/8/05
- *
+ *
****************************************************************************/
#if H5C_DO_SANITY_CHECKS
@@ -294,7 +294,7 @@ if ( ( (new_size) > (dll_size) ) || \
#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)
#define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv)
#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)
-#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
+#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
#endif /* H5C_DO_SANITY_CHECKS */
@@ -1174,9 +1174,9 @@ if ( ( (cache_ptr) == NULL ) || \
* Macro: H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS
*
* Purpose: For efficiency, we sometimes change the order of flushes --
- * but doing so can confuse the replacement policy. This
- * macro exists to allow us to specify an entry as the
- * most recently touched so we can repair any such
+ * but doing so can confuse the replacement policy. This
+ * macro exists to allow us to specify an entry as the
+ * most recently touched so we can repair any such
* confusion.
*
* At present, we only support the modified LRU policy, so
@@ -1192,7 +1192,7 @@ if ( ( (cache_ptr) == NULL ) || \
*
* JRM -- 3/20/06
* Modified macro to ignore pinned entries. Pinned entries
- * do not appear in the data structures maintained by the
+ * do not appear in the data structures maintained by the
* replacement policy code, and thus this macro has nothing
* to do if called for such an entry.
*
@@ -1831,7 +1831,7 @@ if ( ( (cache_ptr) == NULL ) || \
* JRM - 3/17/06
* Modified macro to do nothing if the entry is pinned.
* In this case, the entry is on the pinned entry list, not
- * in the replacement policy data structures, so there is
+ * in the replacement policy data structures, so there is
* nothing to be done.
*
*-------------------------------------------------------------------------
@@ -1947,7 +1947,7 @@ if ( ( (cache_ptr) == NULL ) || \
* unpin of the specified cache entry.
*
* To do this, unlink the specified entry from the protected
- * entry list, and re-insert it in the data structures used
+ * entry list, and re-insert it in the data structures used
* by the current replacement policy.
*
* At present, we only support the modified LRU policy, so
@@ -2271,7 +2271,7 @@ static herr_t H5C_make_space_in_cache(H5F_t * f,
hbool_t * first_flush_ptr);
#if H5C_DO_EXTREME_SANITY_CHECKS
static herr_t H5C_validate_lru_list(H5C_t * cache_ptr);
-static herr_t H5C_verify_not_in_index(H5C_t * cache_ptr,
+static herr_t H5C_verify_not_in_index(H5C_t * cache_ptr,
H5C_cache_entry_t * entry_ptr);
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2444,7 +2444,7 @@ done:
* size control data structures.
*
* JRM -- 6/24/05
- * Added support for the new write_permitted field of
+ * Added support for the new write_permitted field of
* the H5C_t structure.
*
* JRM -- 7/5/05
@@ -2683,7 +2683,7 @@ done:
* ageout method of cache size reduction.
*
* JRM -- 1/19/06
- * Updated function for display the new prefix field of
+ * Updated function for display the new prefix field of
* H5C_t in output.
*
*-------------------------------------------------------------------------
@@ -2705,8 +2705,8 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
switch ( status )
{
case in_spec:
- HDfprintf(stdout,
- "%sAuto cache resize -- no change. (hit rate = %lf)\n",
+ HDfprintf(stdout,
+ "%sAuto cache resize -- no change. (hit rate = %lf)\n",
cache_ptr->prefix, hit_rate);
break;
@@ -2714,9 +2714,9 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
HDassert( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold );
HDassert( old_max_cache_size < new_max_cache_size );
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
- cache_ptr->prefix, hit_rate,
+ cache_ptr->prefix, hit_rate,
(cache_ptr->resize_ctl).lower_hr_threshold);
HDfprintf(stdout,
@@ -2749,7 +2749,7 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
case H5C_decr__age_out:
HDfprintf(stdout,
- "%sAuto cache resize -- decrease by ageout. HR = %lf\n",
+ "%sAuto cache resize -- decrease by ageout. HR = %lf\n",
cache_ptr->prefix, hit_rate);
break;
@@ -2765,7 +2765,7 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
default:
HDfprintf(stdout,
- "%sAuto cache resize -- decrease by unknown mode. HR = %lf\n",
+ "%sAuto cache resize -- decrease by unknown mode. HR = %lf\n",
cache_ptr->prefix, hit_rate);
}
@@ -2779,43 +2779,43 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
break;
case at_max_size:
- HDfprintf(stdout,
- "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
- cache_ptr->prefix, hit_rate,
+ HDfprintf(stdout,
+ "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
+ cache_ptr->prefix, hit_rate,
(cache_ptr->resize_ctl).lower_hr_threshold);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s cache already at maximum size so no change.\n",
cache_ptr->prefix);
break;
case at_min_size:
- HDfprintf(stdout,
- "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n",
+ HDfprintf(stdout,
+ "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n",
cache_ptr->prefix, hit_rate);
- HDfprintf(stdout, "%s cache already at minimum size.\n",
+ HDfprintf(stdout, "%s cache already at minimum size.\n",
cache_ptr->prefix);
break;
case increase_disabled:
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%sAuto cache resize -- increase disabled -- HR = %lf.",
cache_ptr->prefix, hit_rate);
break;
case decrease_disabled:
- HDfprintf(stdout,
- "%sAuto cache resize -- decrease disabled -- HR = %lf.\n",
+ HDfprintf(stdout,
+ "%sAuto cache resize -- decrease disabled -- HR = %lf.\n",
cache_ptr->prefix, hit_rate);
break;
case not_full:
HDassert( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold );
- HDfprintf(stdout,
- "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
- cache_ptr->prefix, hit_rate,
+ HDfprintf(stdout,
+ "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
+ cache_ptr->prefix, hit_rate,
(cache_ptr->resize_ctl).lower_hr_threshold);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s cache not full so no increase in size.\n",
cache_ptr->prefix);
break;
@@ -3004,7 +3004,7 @@ done:
* the marked entries.
*
* JRM -- 10/15/05
- * Added code supporting the new
+ * Added code supporting the new
* H5C__FLUSH_IGNORE_PROTECTED_FLAG. We need this flag, as
* we now use this function to flush large number of entries
* in increasing address order. We do this by marking the
@@ -3012,10 +3012,10 @@ done:
* and then restoring LRU order.
*
* However, it is possible that the cache will contain other,
- * unmarked protected entries, when we make this call. This
+ * unmarked protected entries, when we make this call. This
* new flag allows us to ignore them.
*
- * Note that even with this flag set, it is still an error
+ * Note that even with this flag set, it is still an error
* to try to flush a protected entry.
*
* JRM -- 3/25/065
@@ -3094,9 +3094,9 @@ H5C_flush_cache(H5F_t * f,
node_ptr = H5SL_first(cache_ptr->slist_ptr);
#if H5C_DO_SANITY_CHECKS
- /* H5C_flush_single_entry() now removes dirty entries from the
+ /* H5C_flush_single_entry() now removes dirty entries from the
* slist as it flushes them. Thus for sanity checks we must
- * make note of the initial slist length and size before we
+ * make note of the initial slist length and size before we
* do any flushes.
*/
initial_slist_len = cache_ptr->slist_len;
@@ -3126,8 +3126,8 @@ H5C_flush_cache(H5F_t * f,
if ( entry_ptr->is_protected ) {
- /* we probably have major problems -- but lets flush
- * everything we can before we decide whether to flag
+ /* we probably have major problems -- but lets flush
+ * everything we can before we decide whether to flag
* an error.
*/
tried_to_flush_protected_entry = TRUE;
@@ -3169,7 +3169,7 @@ H5C_flush_cache(H5F_t * f,
HDassert( protected_entries <= cache_ptr->pl_len );
- if ( ( ( cache_ptr->pl_len > 0 ) && ( !ignore_protected ) )
+ if ( ( ( cache_ptr->pl_len > 0 ) && ( !ignore_protected ) )
||
( tried_to_flush_protected_entry ) ) {
@@ -3191,16 +3191,16 @@ done:
* Purpose: Flush dirty entries until the caches min clean size is
* attained.
*
- * This function is used in the implementation of the
- * metadata cache in PHDF5. To avoid "messages from the
- * future", the cache on process 0 can't be allowed to
+ * This function is used in the implementation of the
+ * metadata cache in PHDF5. To avoid "messages from the
+ * future", the cache on process 0 can't be allowed to
* flush entries until the other processes have reached
* the same point in the calculation. If this constraint
* is not met, it is possible that the other processes will
- * read metadata generated at a future point in the
+ * read metadata generated at a future point in the
* computation.
*
- *
+ *
* Return: Non-negative on success/Negative on failure or if
* write is not permitted.
*
@@ -3214,9 +3214,9 @@ done:
* upon return.
*
* Do this by scanning up the dirty LRU list for entries to
- * flush to reach min clean size, setting their flush_marker
- * flags, and recording their addresses in the order
- * encountered.
+ * flush to reach min clean size, setting their flush_marker
+ * flags, and recording their addresses in the order
+ * encountered.
*
* Then call H5C_flush_cache() to flush the marked entries.
*
@@ -3228,13 +3228,13 @@ done:
* This change had the oposite of the desired effect. Lets
* leave it in (albeit commented out for now). If we can't
* find a case where it helps, lets get rid of it.
- *
+ *
*-------------------------------------------------------------------------
*/
herr_t
-H5C_flush_to_min_clean(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
+H5C_flush_to_min_clean(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
H5C_t * cache_ptr)
{
herr_t result;
@@ -3277,7 +3277,7 @@ H5C_flush_to_min_clean(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"cache write is not permitted!?!\n");
}
-#if 1 /* original code */
+#if 1 /* original code */
result = H5C_make_space_in_cache(f,
primary_dxpl_id,
secondary_dxpl_id,
@@ -3291,17 +3291,17 @@ H5C_flush_to_min_clean(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"H5C_make_space_in_cache failed.")
}
-#else /* modified code -- commented out for now */
+#else /* modified code -- commented out for now */
if ( cache_ptr->max_cache_size > cache_ptr->index_size ) {
- if ( ((cache_ptr->max_cache_size - cache_ptr->index_size) +
+ if ( ((cache_ptr->max_cache_size - cache_ptr->index_size) +
cache_ptr->cLRU_list_size) >= cache_ptr->min_clean_size ) {
space_needed = 0;
} else {
- space_needed = cache_ptr->min_clean_size -
+ space_needed = cache_ptr->min_clean_size -
((cache_ptr->max_cache_size - cache_ptr->index_size) +
cache_ptr->cLRU_list_size);
}
@@ -3313,7 +3313,7 @@ H5C_flush_to_min_clean(H5F_t * f,
} else {
- space_needed = cache_ptr->min_clean_size -
+ space_needed = cache_ptr->min_clean_size -
cache_ptr->cLRU_list_size;
}
}
@@ -3360,7 +3360,7 @@ H5C_flush_to_min_clean(H5F_t * f,
( flushed_entries_size < space_needed ) ) {
HDfprintf(stdout, "flushed_entries_count = %d <= %d = slist_size\n",
(int)flushed_entries_count, (int)(cache_ptr->slist_size));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"flushed_entries_size = %d < %d = space_needed.\n",
(int)flushed_entries_size, (int)space_needed);
}
@@ -3370,7 +3370,7 @@ H5C_flush_to_min_clean(H5F_t * f,
/* Flush the marked entries */
- result = H5C_flush_cache(f, primary_dxpl_id, secondary_dxpl_id,
+ result = H5C_flush_cache(f, primary_dxpl_id, secondary_dxpl_id,
cache_ptr, H5C__FLUSH_MARKED_ENTRIES_FLAG |
H5C__FLUSH_IGNORE_PROTECTED_FLAG);
@@ -3380,12 +3380,12 @@ H5C_flush_to_min_clean(H5F_t * f,
}
/* Now touch up the LRU list so as to place the flushed entries in
- * the order they they would be in if we had flushed them in the
+ * the order they they would be in if we had flushed them in the
* order we encountered them in.
*/
i = 0;
- while ( i < flushed_entries_count )
+ while ( i < flushed_entries_count )
{
H5C__SEARCH_INDEX_NO_STATS(cache_ptr, flushed_entries_list[i], \
entry_ptr, FAIL)
@@ -3640,7 +3640,7 @@ H5C_get_entry_status(H5C_t * cache_ptr,
if ( entry_ptr == NULL ) {
- /* the entry doesn't exist in the cache -- report this
+ /* the entry doesn't exist in the cache -- report this
* and quit.
*/
*in_cache_ptr = FALSE;
@@ -3726,7 +3726,7 @@ done:
* is_dirty field of H5C_cache_entry_t into the H5C code.
*
* JRM -- 6/24/05
- * Added support for the new write_permitted field of
+ * Added support for the new write_permitted field of
* the H5C_t structure.
*
* JRM -- 3/16/06
@@ -3734,7 +3734,7 @@ done:
* H5C_cache_entry_t structure.
*
* JRM -- 5/3/06
- * Added initialization for the new dirtied field of the
+ * Added initialization for the new dirtied field of the
* H5C_cache_entry_t structure.
*
*-------------------------------------------------------------------------
@@ -3769,16 +3769,16 @@ H5C_insert_entry(H5F_t * f,
HDassert( H5F_addr_defined(addr) );
HDassert( thing );
-#if H5C_DO_EXTREME_SANITY_CHECKS
+#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_verify_not_in_index(cache_ptr, (H5C_cache_entry_t *)thing) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "thing already in index.\n");
}
#endif /* H5C_DO_SANITY_CHECKS */
-#if H5C_DO_EXTREME_SANITY_CHECKS
+#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
@@ -3943,7 +3943,7 @@ H5C_insert_entry(H5F_t * f,
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
@@ -3971,9 +3971,9 @@ done:
* Function: H5C_mark_entries_as_clean
*
* Purpose: When the H5C code is used to implement the metadata caches
- * in PHDF5, only the cache with MPI_rank 0 is allowed to
+ * in PHDF5, only the cache with MPI_rank 0 is allowed to
* actually write entries to disk -- all other caches must
- * retain dirty entries until they are advised that the
+ * retain dirty entries until they are advised that the
* entries are clean.
*
* This function exists to allow the H5C code to receive these
@@ -3981,13 +3981,13 @@ done:
*
* The function receives a list of entry base addresses
* which must refer to dirty entries in the cache. If any
- * of the entries are either clean or don't exist, the
+ * of the entries are either clean or don't exist, the
* function flags an error.
*
* The function scans the list of entries and flushes all
- * those that are currently unprotected with the
+ * those that are currently unprotected with the
* H5C__FLUSH_CLEAR_ONLY_FLAG. Those that are currently
- * protected are flagged for clearing when they are
+ * protected are flagged for clearing when they are
* unprotected.
*
* Return: Non-negative on success/Negative on failure
@@ -4048,7 +4048,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HDassert(0);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
@@ -4080,7 +4080,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HDassert(0);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
@@ -4094,7 +4094,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
if ( entry_ptr == NULL ) {
#if H5C_DO_SANITY_CHECKS
- HDfprintf(stdout,
+ HDfprintf(stdout,
"H5C_mark_entries_as_clean: entry[%d] = %ld not in cache.\n",
(int)i,
(long)addr);
@@ -4105,7 +4105,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
} else if ( ! entry_ptr->is_dirty ) {
#if H5C_DO_SANITY_CHECKS
- HDfprintf(stdout,
+ HDfprintf(stdout,
"H5C_mark_entries_as_clean: entry %ld is not dirty!?!\n",
(long)addr);
#endif /* H5C_DO_SANITY_CHECKS */
@@ -4131,7 +4131,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
}
}
-#else /* modified code */
+#else /* modified code */
} else {
/* Mark the entry to be cleared on unprotect. We will
* scan the LRU list shortly, and clear all those entries
@@ -4156,12 +4156,12 @@ H5C_mark_entries_as_clean(H5F_t * f,
#endif /* end modified code */
}
#if 1 /* modified code */
- /* Scan through the LRU list from back to front, and flush the
+ /* Scan through the LRU list from back to front, and flush the
* entries whose clear_on_unprotect flags are set. Observe that
* any protected entries will not be on the LRU, and therefore
* will not be flushed at this time.
*/
-
+
entries_cleared = 0;
entries_examined = 0;
initial_list_len = cache_ptr->LRU_list_len;
@@ -4169,7 +4169,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
while ( ( entry_ptr != NULL ) &&
( entries_examined <= initial_list_len ) &&
- ( entries_cleared < ce_array_len ) )
+ ( entries_cleared < ce_array_len ) )
{
if ( entry_ptr->clear_on_unprotect ) {
@@ -4239,13 +4239,13 @@ H5C_mark_entries_as_clean(H5F_t * f,
HDassert( entries_cleared + protected_entries_marked == ce_array_len );
#endif /* H5C_DO_SANITY_CHECKS */
- HDassert( ( entries_cleared == ce_array_len ) ||
+ HDassert( ( entries_cleared == ce_array_len ) ||
( (ce_array_len - entries_cleared) <= cache_ptr->pl_len ) );
#if H5C_DO_SANITY_CHECKS
i = 0;
entry_ptr = cache_ptr->pl_head_ptr;
- while ( entry_ptr != NULL )
+ while ( entry_ptr != NULL )
{
if ( entry_ptr->clear_on_unprotect ) {
@@ -4261,7 +4261,7 @@ done:
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HDassert(0);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
@@ -4280,7 +4280,7 @@ done:
* Purpose: Mark a pinned entry as dirty. The target entry MUST be
* be pinned, and MUST be unprotected.
*
- * If the entry has changed size, the function updates
+ * If the entry has changed size, the function updates
* data structures for the size change.
*
* If the entry is not already dirty, the function places
@@ -4374,16 +4374,16 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_mark_pinned_or_protected_entry_dirty
*
- * Purpose: Mark a pinned or protected entry as dirty. The target entry
+ * Purpose: Mark a pinned or protected entry as dirty. The target entry
* MUST be either pinned or protected, and MAY be both.
*
* At present, this funtion does not support size change.
*
- * In the protected case, this call is the functional
+ * In the protected case, this call is the functional
* equivalent of setting the H5C__DIRTIED_FLAG on an unprotect
* call.
*
- * In the pinned but not protected case, if the entry is not
+ * In the pinned but not protected case, if the entry is not
* already dirty, the function places function marks the entry
* dirty and places it on the skip list.
*
@@ -4498,7 +4498,7 @@ H5C_rename_entry(H5C_t * cache_ptr,
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
@@ -4575,7 +4575,7 @@ done:
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
@@ -4589,7 +4589,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_pin_protected_entry()
*
- * Purpose: Pin a protected cache entry. The entry must be protected
+ * Purpose: Pin a protected cache entry. The entry must be protected
* at the time of call, and must be unpinned.
*
* Return: Non-negative on success/Negative on failure
@@ -4703,7 +4703,7 @@ done:
* Hand optimizations.
*
* JRM -- 5/3/06
- * Added code to set the new dirtied field in
+ * Added code to set the new dirtied field in
* H5C_cache_entry_t to FALSE prior to return.
*
*-------------------------------------------------------------------------
@@ -4741,7 +4741,7 @@ H5C_protect(H5F_t * f,
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HDassert(0);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \
"LRU sanity check failed.\n");
@@ -4966,7 +4966,7 @@ done:
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HDassert(0);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \
"LRU sanity check failed.\n");
@@ -5273,7 +5273,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_set_prefix
*
- * Purpose: Set the values of the prefix field of H5C_t. This
+ * Purpose: Set the values of the prefix field of H5C_t. This
* filed is used to label some debugging output.
*
* Return: Non-negative on success/Negative on failure
@@ -5374,11 +5374,11 @@ done:
* Updated function for the addition of the hash table.
*
* JRM -- 9/8/05
- * Updated function for the addition of cache entry size
+ * Updated function for the addition of cache entry size
* change statistics.
*
* JRM -- 1/13/06
- * Added code to use the prefix field of H5C_t to allow
+ * Added code to use the prefix field of H5C_t to allow
* tagging of statistics output.
*
* JRM -- 3/21/06
@@ -5502,7 +5502,7 @@ H5C_stats(H5C_t * cache_ptr,
HDfprintf(stdout, "\n");
HDfprintf(stdout,
- "%s hash table insertion / deletions = %ld / %ld\n",
+ "%s hash table insertion / deletions = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->total_ht_insertions),
(long)(cache_ptr->total_ht_deletions));
@@ -5550,7 +5550,7 @@ H5C_stats(H5C_t * cache_ptr,
(long)(cache_ptr->max_pel_size),
(long)(cache_ptr->pel_len),
(long)(cache_ptr->max_pel_len));
-
+
HDfprintf(stdout,
"%s current LRU list size / length = %ld / %ld\n",
cache_ptr->prefix,
@@ -5593,7 +5593,7 @@ H5C_stats(H5C_t * cache_ptr,
(long)total_size_increases,
(long)total_size_decreases);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s Total entry pins (dirty) / unpins = %ld (%ld) / %ld\n",
cache_ptr->prefix,
(long)total_pins,
@@ -5663,19 +5663,19 @@ H5C_stats(H5C_t * cache_ptr,
(long)(cache_ptr->insertions[i]),
(long)(cache_ptr->renames[i]));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s size increases / decreases = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->size_increases[i]),
(long)(cache_ptr->size_decreases[i]));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s entry pins / unpins = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->pins[i]),
(long)(cache_ptr->unpins[i]));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s entry dirty pins/pin'd flushes = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->dirty_pins[i]),
@@ -5812,7 +5812,7 @@ H5C_stats__reset(H5C_t * cache_ptr)
/*-------------------------------------------------------------------------
* Function: H5C_unpin_entry()
*
- * Purpose: Unpin a cache entry. The entry must be unprotected at
+ * Purpose: Unpin a cache entry. The entry must be unprotected at
* the time of call, and must be pinned.
*
* Return: Non-negative on success/Negative on failure
@@ -5945,7 +5945,7 @@ done:
* the new H5C__PIN_ENTRY_FLAG and H5C__UNPIN_ENTRY_FLAG flags.
*
* JRM -- 5/3/06
- * Added code to make use of the new dirtied field in
+ * Added code to make use of the new dirtied field in
* H5C_cache_entry_t. If this field is TRUE, it is the
* equivalent of setting the H5C__DIRTIED_FLAG.
*
@@ -6002,27 +6002,27 @@ H5C_unprotect(H5F_t * f,
HDassert( entry_ptr->addr == addr );
HDassert( entry_ptr->type == type );
- /* also set the dirtied variable if the dirtied field is set in
+ /* also set the dirtied variable if the dirtied field is set in
* the entry.
*/
dirtied |= entry_ptr->dirtied;
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
#ifdef H5_HAVE_PARALLEL
- /* When the H5C code is used to implement the metadata cache in the
+ /* When the H5C code is used to implement the metadata cache in the
* PHDF5 case, only the cache on process 0 is allowed to write to file.
* All the other metadata caches must hold dirty entries until they
- * are told that the entries are clean.
+ * are told that the entries are clean.
*
- * The clear_on_unprotect flag in the H5C_cache_entry_t structure
- * exists to deal with the case in which an entry is protected when
+ * The clear_on_unprotect flag in the H5C_cache_entry_t structure
+ * exists to deal with the case in which an entry is protected when
* its cache receives word that the entry is now clean. In this case,
* the clear_on_unprotect flag is set, and the entry is flushed with
* the H5C__FLUSH_CLEAR_ONLY_FLAG.
@@ -6198,14 +6198,14 @@ H5C_unprotect(H5F_t * f,
"hash table contains multiple entries for addr?!?.")
}
- if ( H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
+ if ( H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
cache_ptr,
- type,
- addr,
+ type,
+ addr,
H5C__FLUSH_CLEAR_ONLY_FLAG,
- &dummy_first_flush,
+ &dummy_first_flush,
TRUE) < 0 ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear.")
@@ -6219,7 +6219,7 @@ done:
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
@@ -7538,17 +7538,17 @@ done:
*
* If the cache contains protected entries, the function will
* fail, as protected entries cannot be either flushed or
- * destroyed. However all unprotected entries should be
+ * destroyed. However all unprotected entries should be
* flushed and destroyed before the function returns failure.
*
* While pinned entries can usually be flushed, they cannot
* be destroyed. However, they should be unpinned when all
* the entries that reference them have been destroyed (thus
* reduding the pinned entry's reference count to 0, allowing
- * it to be unpinned).
+ * it to be unpinned).
*
- * If pinned entries are present, the function makes repeated
- * passes through the cache, flushing all dirty entries
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
* (including the pinned dirty entries where permitted) and
* destroying all unpinned entries. This process is repeated
* until either the cache is empty, or the number of pinned
@@ -7625,7 +7625,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
cur_pel_len = cache_ptr->pel_len;
old_pel_len = cache_ptr->pel_len;
- while ( ( first_pass ) ||
+ while ( ( first_pass ) ||
( ( cur_pel_len < old_pel_len ) && ( protected_entries == 0 ) ) )
{
have_pinned_entries = ( cur_pel_len > 0 );
@@ -7642,9 +7642,9 @@ H5C_flush_invalidate_cache(H5F_t * f,
node_ptr = H5SL_first(cache_ptr->slist_ptr);
#if H5C_DO_SANITY_CHECKS
- /* Depending on circumstances, H5C_flush_single_entry() will
- * remove dirty entries from the slist as it flushes them.
- * Thus for sanity checks we must make note of the initial
+ /* Depending on circumstances, H5C_flush_single_entry() will
+ * remove dirty entries from the slist as it flushes them.
+ * Thus for sanity checks we must make note of the initial
* slist length and size before we do any flushes.
*/
initial_slist_len = cache_ptr->slist_len;
@@ -7657,13 +7657,13 @@ H5C_flush_invalidate_cache(H5F_t * f,
{
/* Note that we now remove nodes from the slist as we flush
* the associated entries, instead of leaving them there
- * until we are done, and then destroying all nodes in
+ * until we are done, and then destroying all nodes in
* the slist.
*
* While this optimization is still easy if everything works,
- * the addition of pinned entries and multiple passes
+ * the addition of pinned entries and multiple passes
* through the cache to allow entries to unpin themselves
- * complicates error recover greatly.
+ * complicates error recover greatly.
*
* Given these complications, I've decided to ommit this
* this optimization for now. It can be re-implemented
@@ -7687,7 +7687,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
if ( entry_ptr->is_protected ) {
- /* we have major problems -- but lets flush
+ /* we have major problems -- but lets flush
* everything we can before we flag an error.
*/
protected_entries++;
@@ -7728,7 +7728,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
cache_ptr,
NULL,
entry_ptr->addr,
- (cooked_flags |
+ (cooked_flags |
H5C__FLUSH_INVALIDATE_FLAG),
&first_flush,
TRUE);
@@ -7749,8 +7749,8 @@ H5C_flush_invalidate_cache(H5F_t * f,
#endif /* H5C_DO_SANITY_CHECKS */
/* Since we are doing a destroy, we must make a pass through
- * the hash table and try to flush - destroy all entries that
- * remain. Note that all remaining entries entries must be
+ * the hash table and try to flush - destroy all entries that
+ * remain. Note that all remaining entries entries must be
* clean, so this will not result in any writes to disk.
*/
for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ )
@@ -7784,7 +7784,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
cache_ptr,
NULL,
entry_ptr->addr,
- (cooked_flags |
+ (cooked_flags |
H5C__FLUSH_INVALIDATE_FLAG),
&first_flush,
TRUE);
@@ -7797,8 +7797,8 @@ H5C_flush_invalidate_cache(H5F_t * f,
"Clean entry flush destroy failed.")
}
}
- /* We can't do anything if the entry is pinned. The
- * hope is that the entry will be unpinned as the
+ /* We can't do anything if the entry is pinned. The
+ * hope is that the entry will be unpinned as the
* result of destroys of entries that reference it.
*
* We detect this by noting the change in the number
@@ -7811,7 +7811,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
HDassert( protected_entries == cache_ptr->pl_len );
old_pel_len = cur_pel_len;
- cur_pel_len = cache_ptr->pel_len;
+ cur_pel_len = cache_ptr->pel_len;
first_pass = FALSE;
@@ -7898,9 +7898,9 @@ done:
* respectively.
*
* JRM -- 6/24/05
- * Added code to remove dirty entries from the slist after
- * they have been flushed. Also added a sanity check that
- * will scream if we attempt a write when writes are
+ * Added code to remove dirty entries from the slist after
+ * they have been flushed. Also added a sanity check that
+ * will scream if we attempt a write when writes are
* completely disabled.
*
* JRM -- 7/5/05
@@ -8233,7 +8233,7 @@ H5C_flush_single_entry(H5F_t * f,
if ( cache_ptr->log_flush ) {
- status = (cache_ptr->log_flush)(cache_ptr, addr, was_dirty,
+ status = (cache_ptr->log_flush)(cache_ptr, addr, was_dirty,
flags, type_id);
if ( status < 0 ) {
@@ -8491,7 +8491,7 @@ H5C_make_space_in_cache(H5F_t * f,
entry_ptr = cache_ptr->dLRU_tail_ptr;
if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
-
+
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
} else {
@@ -8499,7 +8499,7 @@ H5C_make_space_in_cache(H5F_t * f,
empty_space = 0;
}
- while ( ( (cache_ptr->cLRU_list_size + empty_space)
+ while ( ( (cache_ptr->cLRU_list_size + empty_space)
< cache_ptr->min_clean_size ) &&
( entries_examined <= initial_list_len ) &&
( entry_ptr != NULL )
@@ -8587,7 +8587,7 @@ done:
*
* Purpose: Debugging function that scans the LRU list for errors.
*
- * If an error is detected, the function generates a
+ * If an error is detected, the function generates a
* diagnostic and returns FAIL. If no error is detected,
* the function returns SUCCEED.
*
@@ -8615,10 +8615,10 @@ H5C_validate_lru_list(H5C_t * cache_ptr)
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if ( ( ( cache_ptr->LRU_head_ptr == NULL )
- ||
- ( cache_ptr->LRU_tail_ptr == NULL )
- )
+ if ( ( ( cache_ptr->LRU_head_ptr == NULL )
+ ||
+ ( cache_ptr->LRU_tail_ptr == NULL )
+ )
&&
( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
) {
@@ -8633,7 +8633,7 @@ H5C_validate_lru_list(H5C_t * cache_ptr)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
}
- if ( ( cache_ptr->LRU_list_len == 1 )
+ if ( ( cache_ptr->LRU_list_len == 1 )
&&
( ( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
||
@@ -8688,7 +8688,7 @@ H5C_validate_lru_list(H5C_t * cache_ptr)
entry_ptr = entry_ptr->next;
}
- if ( ( cache_ptr->LRU_list_len != len ) ||
+ if ( ( cache_ptr->LRU_list_len != len ) ||
( cache_ptr->LRU_list_size != size ) ) {
HDfprintf(stdout,"H5C_validate_lru_list: Check 7 failed.\n");
@@ -8717,7 +8717,7 @@ done:
* that the specified instance of H5C_cache_entry_t is not
* present.
*
- * If an error is detected, the function generates a
+ * If an error is detected, the function generates a
* diagnostic and returns FAIL. If no error is detected,
* the function returns SUCCEED.
*
@@ -8756,7 +8756,7 @@ H5C_verify_not_in_index(H5C_t * cache_ptr,
{
if ( scan_ptr == entry_ptr ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"H5C_verify_not_in_index: entry in index (%d/%d)\n",
i, depth);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
diff --git a/src/H5CS.c b/src/H5CS.c
index 3de842b..79db4ba 100644
--- a/src/H5CS.c
+++ b/src/H5CS.c
@@ -167,10 +167,10 @@ herr_t
H5CS_print(FILE *stream)
{
H5CS_t *fstack = H5CS_get_my_stack (); /* Get the correct function stack */
-
+
/* Don't push this function on the function stack... :-) */
FUNC_ENTER_NOAPI_NOFUNC_NOFS(H5CS_print);
-
+
/* Sanity check */
assert(fstack);
@@ -270,7 +270,7 @@ H5CS_copy_stack(H5CS_t *new_stack)
{
H5CS_t *old_stack = H5CS_get_my_stack ();
unsigned u; /* Local index variable */
-
+
/* Don't push this function on the function stack... :-) */
FUNC_ENTER_NOAPI_NOFUNC_NOFS(H5CS_copy_stack);
@@ -304,7 +304,7 @@ herr_t
H5CS_close_stack(H5CS_t *stack)
{
unsigned u; /* Local index variable */
-
+
/* Don't push this function on the function stack... :-) */
FUNC_ENTER_NOAPI_NOFUNC_NOFS(H5CS_close_stack);
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 12fcfe5..16c27de 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -79,7 +79,7 @@
*
* JRM - 7/19/04
*
- * The TBBT has since been replaced with a skip list. This change
+ * The TBBT has since been replaced with a skip list. This change
* greatly predates this note.
*
* JRM - 9/26/05
@@ -87,7 +87,7 @@
* magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. This
* field is used to validate pointers to instances of H5C_t.
*
- * aux_ptr: Pointer to void used to allow wrapper code to associate
+ * aux_ptr: Pointer to void used to allow wrapper code to associate
* its data with an instance of H5C_t. The H5C cache code
* sets this field to NULL, and otherwise leaves it alone.
*
@@ -142,10 +142,10 @@
* the cache uses the following write_permitted field to
* determine whether writes are permitted.
*
- * write_permitted: If check_write_permitted is NULL, this boolean flag
+ * write_permitted: If check_write_permitted is NULL, this boolean flag
* indicates whether writes are permitted.
*
- * log_flush: If provided, this function is called whenever a dirty
+ * log_flush: If provided, this function is called whenever a dirty
* entry is flushed to disk.
*
*
@@ -187,7 +187,7 @@
* on how frequently the cache is flushed. We will see how it goes.
*
* For now at least, I will not remove dirty entries from the list as they
- * are flushed. (this has been changed -- dirty entries are now removed from
+ * are flushed. (this has been changed -- dirty entries are now removed from
* the skip list as they are flushed. JRM - 10/25/05)
*
* slist_len: Number of entries currently in the skip list
@@ -235,22 +235,22 @@
*
* For very frequently used entries, the protect/unprotect overhead can
* become burdensome. To avoid this overhead, I have modified the cache
- * to allow entries to be "pinned". A pinned entry is similar to a
+ * to allow entries to be "pinned". A pinned entry is similar to a
* protected entry, in the sense that it cannot be evicted, and that
* the entry can be modified at any time.
*
* Pinning an entry has the following implications:
*
* 1) A pinned entry cannot be evicted. Thus unprotected
- * pinned entries reside in the pinned entry list, instead
+ * pinned entries reside in the pinned entry list, instead
* of the LRU list(s) (or other lists maintained by the current
* replacement policy code).
- *
+ *
* 2) A pinned entry can be accessed or modified at any time.
* Therefore, the cache must check with the entry owner
* before flushing it. If permission is denied, the
* cache just skips the entry in the flush.
- *
+ *
* 3) A pinned entry can be marked as dirty (and possibly
* change size) while it is unprotected.
*
@@ -263,20 +263,20 @@
*
* Maintaining the pinned entry list requires the following fields:
*
- * pel_len: Number of entries currently residing on the pinned
+ * pel_len: Number of entries currently residing on the pinned
* entry list.
*
* pel_size: Number of bytes of cache entries currently residing on
* the pinned entry list.
*
* pel_head_ptr: Pointer to the head of the doubly linked list of pinned
- * but not protected entries. Note that cache entries on
+ * but not protected entries. Note that cache entries on
* this list are linked by their next and prev fields.
*
* This field is NULL if the list is empty.
*
* pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned
- * but not protected entries. Note that cache entries on
+ * but not protected entries. Note that cache entries on
* this list are linked by their next and prev fields.
*
* This field is NULL if the list is empty.
@@ -568,24 +568,24 @@
* id equal to the array index has been marked dirty while pinned
* in the current epoch.
*
- * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry
- * with type id equal to the array index has been flushed while
+ * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry
+ * with type id equal to the array index has been flushed while
* pinned in the current epoch.
*
- * pinned_cleared: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry
- * with type id equal to the array index has been cleared while
+ * pinned_cleared: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry
+ * with type id equal to the array index has been cleared while
* pinned in the current epoch.
*
*
- * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
+ * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
* with type id equal to the array index has increased in
* size in the current epoch.
*
- * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
+ * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
* with type id equal to the array index has decreased in
* size in the current epoch.
*
@@ -662,8 +662,8 @@
* the cache in the current epoch.
*
* max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times that any single
- * entry with type id equal to the array index that has been
+ * are used to record the maximum number of times that any single
+ * entry with type id equal to the array index that has been
* marked as pinned in the cache in the current epoch.
*
*
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index bdcf501..fd54d69 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -187,8 +187,8 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
*
* In typical application, this structure is the first field in a
* structure to be cached. For historical reasons, the external module
- * is responsible for managing the is_dirty field (this is no longer
- * completely true. See the comment on the is_dirty field for details).
+ * is responsible for managing the is_dirty field (this is no longer
+ * completely true. See the comment on the is_dirty field for details).
* All other fields are managed by the cache.
*
* The fields of this structure are discussed individually below:
@@ -230,23 +230,23 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
*
* Update: Management of the is_dirty field has been largely
* moved into the cache. The only remaining exceptions
- * are the flush and clear functions supplied by the
- * modules using the cache. These still clear the
+ * are the flush and clear functions supplied by the
+ * modules using the cache. These still clear the
* is_dirty field as before. -- JRM 7/5/05
*
* dirtied: Boolean flag used to indicate that the entry has been
* dirtied while protected.
*
* This field is set to FALSE in the protect call, and may
- * be set to TRUE by the
+ * be set to TRUE by the
* H5C_mark_pinned_or_protected_entry_dirty()
* call at an time prior to the unprotect call.
*
- * The H5C_mark_pinned_or_protected_entry_dirty() call exists
+ * The H5C_mark_pinned_or_protected_entry_dirty() call exists
* as a convenience function for the fractal heap code which
* may not know if an entry is protected or pinned, but knows
- * that is either protected or pinned. The dirtied field was
- * added as in the parallel case, it is necessary to know
+ * that is either protected or pinned. The dirtied field was
+ * added as in the parallel case, it is necessary to know
* whether a protected entry was dirty prior to the protect call.
*
* is_protected: Boolean flag indicating whether this entry is protected
@@ -261,7 +261,7 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* is_pinned: Boolean flag indicating whether the entry has been pinned
* in the cache.
*
- * For very hot entries, the protect / unprotect overhead
+ * For very hot entries, the protect / unprotect overhead
* can become excessive. Thus the cache has been extended
* to allow an entry to be "pinned" in the cache.
*
@@ -274,14 +274,14 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
*
* 2) A pinned entry can be accessed or modified at any time.
* Therefore, the cache must check with the entry owner
- * before flushing it. If permission is denied, the
+ * before flushing it. If permission is denied, the
* cache does not flush the entry.
*
- * 3) A pinned entry can be marked as dirty (and possibly
+ * 3) A pinned entry can be marked as dirty (and possibly
* change size) while it is unprotected.
*
- * 4) The flush-destroy code must allow pinned entries to
- * be unpinned (and possibly unprotected) during the
+ * 4) The flush-destroy code must allow pinned entries to
+ * be unpinned (and possibly unprotected) during the
* flush.
*
* JRM -- 3/16/06
@@ -301,13 +301,13 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
*
* clear_on_unprotect: Boolean flag used only in PHDF5. When H5C is used
* to implement the metadata cache In the parallel case, only
- * the cache with mpi rank 0 is allowed to actually write to
+ * the cache with mpi rank 0 is allowed to actually write to
* file -- all other caches must retain dirty entries until they
- * are advised that the entry is clean.
+ * are advised that the entry is clean.
*
- * This flag is used in the case that such an advisory is
+ * This flag is used in the case that such an advisory is
* received when the entry is protected. If it is set when an
- * entry is unprotected, and the dirtied flag is not set in
+ * entry is unprotected, and the dirtied flag is not set in
* the unprotect, the entry's is_dirty flag is reset by flushing
* it with the H5C__FLUSH_CLEAR_ONLY_FLAG.
*
@@ -406,7 +406,7 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* been flushed to file in its life time.
*
* pins: int32_t containing the number of times this cache entry has
- * been pinned in cache in its life time.
+ * been pinned in cache in its life time.
*
****************************************************************************/
@@ -416,7 +416,7 @@ typedef struct H5C_cache_entry_t
size_t size;
const H5C_class_t * type;
hbool_t is_dirty;
- hbool_t dirtied;
+ hbool_t dirtied;
hbool_t is_protected;
hbool_t is_pinned;
hbool_t in_slist;
diff --git a/src/H5D.c b/src/H5D.c
index 5c4591d..0fc4168 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -196,7 +196,7 @@ H5D_init_interface(void)
size_t def_hyp_vec_size = H5D_XFER_HYPER_VECTOR_SIZE_DEF;
#ifdef H5_HAVE_PARALLEL
H5FD_mpio_xfer_t def_io_xfer_mode = H5D_XFER_IO_XFER_MODE_DEF;
- H5FD_mpio_chunk_opt_t def_mpio_chunk_opt_mode = H5D_XFER_MPIO_CHUNK_OPT_HARD_DEF;
+ H5FD_mpio_chunk_opt_t def_mpio_chunk_opt_mode = H5D_XFER_MPIO_CHUNK_OPT_HARD_DEF;
unsigned def_mpio_chunk_opt_num = H5D_XFER_MPIO_CHUNK_OPT_NUM_DEF;
unsigned def_mpio_chunk_opt_ratio = H5D_XFER_MPIO_CHUNK_OPT_RATIO_DEF;
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
index aaa04ea..ae4e473 100644
--- a/src/H5Dcompact.c
+++ b/src/H5Dcompact.c
@@ -155,7 +155,7 @@ done:
* Return: Non-negative on success, negative on failure.
*
* Programmer: Peter Cao
- * December 11, 2005
+ * December 11, 2005
*
*-------------------------------------------------------------------------
*/
@@ -181,10 +181,10 @@ H5D_compact_copy(H5F_t *f_src, H5O_layout_t *layout_src,
HDassert(layout_dst && H5D_COMPACT == layout_dst->type);
/* If there's a source datatype, set up type conversion information */
- if (!dt_src)
+ if (!dt_src)
/* Type conversion not necessary */
HDmemcpy(layout_dst->u.compact.buf, layout_src->u.compact.buf, layout_src->u.compact.size);
- else {
+ else {
H5T_path_t *tpath_src_mem, *tpath_mem_dst; /* Datatype conversion paths */
H5T_t *dt_dst; /* Destination datatype */
H5T_t *dt_mem; /* Memory datatype */
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index 09af474..285f121 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -988,7 +988,7 @@ done:
* Return: Non-negative on success, negative on failure.
*
* Programmer: Quincey Koziol
- * Monday, November 21, 2005
+ * Monday, November 21, 2005
*
* Modifier: Peter Cao
* Saturday, January 07, 2006
@@ -996,7 +996,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_contig_copy(H5F_t *f_src, H5O_layout_t *layout_src,
+H5D_contig_copy(H5F_t *f_src, H5O_layout_t *layout_src,
H5F_t *f_dst, H5O_layout_t *layout_dst, H5T_t *dt_src, hid_t dxpl_id)
{
haddr_t addr_src; /* File offset in source dataset */
diff --git a/src/H5Dio.c b/src/H5Dio.c
index dec068e..05b428e 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -1249,7 +1249,7 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
if(H5D_contig_collective_io(io_info,file_space,mem_space,buf,TRUE)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "contiguous write failed in collective mode");
}
- else
+ else
#endif
{
if((io_info->ops.write)(io_info,
@@ -1514,7 +1514,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
if(H5D_chunk_collective_io(io_info,&fm,buf,FALSE)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunked read failed in collective mode");
}
-
+
else {/* sequential or independent read */
#endif
/* Get first node in chunk skip list */
@@ -1830,7 +1830,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
#ifdef H5_HAVE_PARALLEL
/* Check whether the collective mode can be turned off globally*/
-
+
if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE) {
if(H5D_mpio_chunk_adjust_iomode(io_info,&fm))
HGOTO_ERROR(H5E_DATASET,H5E_CANTGET,FAIL,"can't adjust collective I/O")
@@ -1840,7 +1840,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunked write failed in collective mode");
}
else {/* sequential or independent write */
-
+
#endif /* H5_HAVE_PARALLEL */
/* Get first node in chunk skip list */
chunk_node=H5SL_first(fm.fsel);
@@ -2188,14 +2188,14 @@ H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *f
/* calculate total chunk in file map*/
fm->select_chunk = NULL;
fm->total_chunks = 1;
- for(u=0; u<fm->f_ndims; u++)
+ for(u=0; u<fm->f_ndims; u++)
fm->total_chunks= fm->total_chunks*fm->chunks[u];
- if(IS_H5FD_MPI(dataset->oloc.file)) {
+ if(IS_H5FD_MPI(dataset->oloc.file)) {
H5_CHECK_OVERFLOW(fm->total_chunks, hsize_t, size_t);
if(NULL == (fm->select_chunk = (hbool_t *) H5MM_calloc((size_t)fm->total_chunks * sizeof(hbool_t))))
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate chunk info")
}
-
+
@@ -2219,9 +2219,9 @@ H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *f
HGOTO_ERROR (H5E_DATASET, H5E_BADSELECT, FAIL, "unable to convert from file to memory data space")
/* If the selection is NONE or POINTS, set the flag to FALSE */
- if(fsel_type == H5S_SEL_POINTS || fsel_type == H5S_SEL_NONE)
+ if(fsel_type == H5S_SEL_POINTS || fsel_type == H5S_SEL_NONE)
sel_hyper_flag = FALSE;
- else
+ else
sel_hyper_flag = TRUE;
/* Check if file selection is a point selection */
if(!sel_hyper_flag) {
@@ -2487,7 +2487,7 @@ H5D_create_chunk_file_map_hyper(fm_map *fm,const H5D_t *dset)
end[u]=(coords[u]+fm->chunk_dim[u])-1;
} /* end for */
-
+
/* Calculate the index of this chunk */
if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
diff --git a/src/H5Distore.c b/src/H5Distore.c
index 2978f72..b479aa0 100644
--- a/src/H5Distore.c
+++ b/src/H5Distore.c
@@ -196,7 +196,7 @@ typedef struct H5D_istore_it_ud4_t {
haddr_t addr_dst; /* Address of dest. B-tree */
void *buf; /* Buffer to hold chunk data for read/write */
size_t buf_size; /* Buffer size */
-
+
/* needed for converting variable-length data */
hid_t tid_src; /* Datatype ID for source datatype */
hid_t tid_dst; /* Datatype ID for destination datatype */
@@ -911,12 +911,12 @@ H5D_istore_iter_chunkmap (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_lt
int ret_value = H5B_ITER_CONT; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_iter_chunkmap);
-
+
rank = udata->common.mesg->u.chunk.ndims - 1;
-
+
if(H5V_chunk_index(rank,lt_key->offset,udata->common.mesg->u.chunk.dim,udata->down_chunks,&chunk_index)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
-
+
udata->chunk_addr[chunk_index] = addr;
done:
@@ -969,15 +969,15 @@ H5D_istore_iter_dump (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_lt_key
/*-------------------------------------------------------------------------
- * Function: H5D_istore_iter_copy
+ * Function: H5D_istore_iter_copy
*
- * Purpose: copy chunked raw data from source file and insert to the
+ * Purpose: copy chunked raw data from source file and insert to the
* B-tree node in the destination file
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Peter Cao
- * August 20, 2005
+ * Programmer: Peter Cao
+ * August 20, 2005
*
*-------------------------------------------------------------------------
*/
@@ -2446,8 +2446,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5D_istore_chunkmap
*
- * Purpose: obtain the chunk address and corresponding chunk index
- *
+ * Purpose: obtain the chunk address and corresponding chunk index
+ *
* Return: Success: Non-negative on succeed.
*
* Failure: negative value
@@ -3510,13 +3510,13 @@ done:
* Return: Non-negative on success (with the ISTORE argument initialized
* and ready to write to an object header). Negative on failure.
*
- * Programmer: Peter Cao
- * August 20, 2005
+ * Programmer: Peter Cao
+ * August 20, 2005
*
*-------------------------------------------------------------------------
*/
herr_t
-H5D_istore_copy(H5F_t *f_src, H5O_layout_t *layout_src, H5F_t *f_dst,
+H5D_istore_copy(H5F_t *f_src, H5O_layout_t *layout_src, H5F_t *f_dst,
H5O_layout_t *layout_dst, H5T_t *dt_src, H5O_pline_t *pline, hid_t dxpl_id)
{
H5D_istore_it_ud4_t udata;
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index 7ad8e50..ead1777 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -104,29 +104,29 @@ typedef struct H5D_common_coll_info_t {
size_t mpi_buf_count;
haddr_t chunk_addr;
} H5D_common_coll_info_t;
-
+
/********************/
/* Local Prototypes */
/********************/
-static herr_t
-H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
+static herr_t
+H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
hbool_t do_write);
static herr_t
-H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
+H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
hbool_t do_write,int sum_chunk);
-static herr_t
+static herr_t
H5D_inter_collective_io(H5D_io_info_t *io_info,const H5S_t *file_space,
- const H5S_t *mem_space,haddr_t addr,
+ const H5S_t *mem_space,haddr_t addr,
const void *buf, hbool_t do_write );
-static herr_t
+static herr_t
H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,
MPI_Datatype *mpi_buf_type,
- H5D_common_coll_info_t* coll_info,
+ H5D_common_coll_info_t* coll_info,
const void *buf, hbool_t do_write);
#ifdef OLD_WAY
static herr_t
@@ -134,14 +134,14 @@ H5D_pre_sort_chunk(H5D_io_info_t *io_info,int total_chunks,
haddr_t total_chunk_addr_array[]);
#endif
-static herr_t
+static herr_t
H5D_sort_chunk(H5D_io_info_t * io_info,
fm_map *fm,
H5D_chunk_addr_info_t chunk_addr_info_array[],
int many_chunk_opt);
-static herr_t
-H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
+static herr_t
+H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
fm_map *fm,
uint8_t assign_io_mode[],
haddr_t chunk_addr[]);
@@ -151,7 +151,7 @@ static herr_t H5D_ioinfo_make_coll(H5D_io_info_t *io_info);
static herr_t H5D_mpio_get_min_chunk(const H5D_io_info_t *io_info,
const fm_map *fm, int *min_chunkf);
static int H5D_cmp_chunk_addr(const void *addr1, const void *addr2);
-static herr_t
+static herr_t
H5D_mpio_get_sum_chunk(const H5D_io_info_t *io_info,
const fm_map *fm, int *sum_chunkf);
@@ -284,10 +284,10 @@ done:
* Decription: If H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS is not defined,
collective IO with no contribution from one or more
processes are not assured. We will check the minimum
- number of chunks the process is used. If the number is
+ number of chunks the process is used. If the number is
zero, we will use independent IO mode instead.
This is necessary with Linked chunk IO.
- * Purpose: Checks if it is possible to do collective IO
+ * Purpose: Checks if it is possible to do collective IO
*
* Return: Success: Non-negative: TRUE or FALSE
* Failure: Negative
@@ -307,8 +307,8 @@ H5D_mpio_chunk_adjust_iomode(H5D_io_info_t *io_info, const fm_map *fm) {
#ifndef H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS
if(H5D_mpio_get_min_chunk(io_info,fm,&min_chunk)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to obtain the min chunk number of all processes");
- if(min_chunk == 0) {
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to obtain the min chunk number of all processes");
+ if(min_chunk == 0) {
H5P_genplist_t *dx_plist; /* Data transer property list */
/* Get the dataset transfer property list */
@@ -347,11 +347,11 @@ done:
*/
herr_t
H5D_mpio_select_read(H5D_io_info_t *io_info,
- size_t mpi_buf_count,
+ size_t mpi_buf_count,
const size_t UNUSED elmt_size,
- const H5S_t UNUSED *file_space,
+ const H5S_t UNUSED *file_space,
const H5S_t UNUSED *mem_space,
- haddr_t addr,
+ haddr_t addr,
void *buf/*out*/)
{
herr_t ret_value = SUCCEED;
@@ -378,9 +378,9 @@ done:
*/
herr_t
H5D_mpio_select_write(H5D_io_info_t *io_info,
- size_t mpi_buf_count,
+ size_t mpi_buf_count,
const size_t UNUSED elmt_size,
- const H5S_t UNUSED *file_space,
+ const H5S_t UNUSED *file_space,
const H5S_t UNUSED *mem_space,
haddr_t addr,
const void *buf)
@@ -569,8 +569,8 @@ done:
* Function: H5D_contig_collective_io
*
* Purpose: Wrapper Routine for H5D_inter_collective_io
- The starting address of contiguous storage is passed
- *
+ The starting address of contiguous storage is passed
+ *
*
* Return: Non-negative on success/Negative on failure
*
@@ -581,11 +581,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_contig_collective_io(H5D_io_info_t *io_info,
+H5D_contig_collective_io(H5D_io_info_t *io_info,
const H5S_t *file_space,
const H5S_t *mem_space,
const void *buf,
- hbool_t do_write)
+ hbool_t do_write)
{
@@ -607,8 +607,8 @@ H5D_contig_collective_io(H5D_io_info_t *io_info,
#endif
if(H5D_inter_collective_io(io_info,file_space,mem_space,addr,buf,do_write)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish shared collective MPI-IO");
-
- done:
+
+ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_contig_collective_io */
@@ -616,10 +616,10 @@ H5D_contig_collective_io(H5D_io_info_t *io_info,
/*-------------------------------------------------------------------------
* Function: H5D_chunk_collective_io
*
- * Purpose: Routine for
- 1) choose an IO option:
+ * Purpose: Routine for
+ 1) choose an IO option:
a) One collective IO defined by one MPI derived datatype to link through all chunks
- or b) multiple chunk IOs,to do MPI-IO for each chunk, the IO mode may be adjusted
+ or b) multiple chunk IOs,to do MPI-IO for each chunk, the IO mode may be adjusted
due to the selection pattern for each chunk.
* For option a)
1. Sort the chunk address, obtain chunk info according to the sorted chunk address
@@ -633,7 +633,7 @@ H5D_contig_collective_io(H5D_io_info_t *io_info,
2. Depending on whether the IO mode is collective or independent or none,
Create either MPI derived datatype for each chunk to do collective IO or just do independent IO
3. Set up collective IO property list for collective mode
- 4. DO IO
+ 4. DO IO
*
* Return: Non-negative on success/Negative on failure
*
@@ -643,44 +643,44 @@ H5D_contig_collective_io(H5D_io_info_t *io_info,
*
*-------------------------------------------------------------------------
*/
-herr_t
-H5D_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool_t do_write)
+herr_t
+H5D_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool_t do_write)
{
int io_option = H5D_MULTI_CHUNK_IO_MORE_OPT;
int sum_chunk = 0,mpi_size;
unsigned one_link_chunk_io_threshold;
- H5P_genplist_t *plist;
+ H5P_genplist_t *plist;
H5FD_mpio_chunk_opt_t chunk_opt_mode;
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
htri_t check_prop,temp_not_link_io = FALSE;
int prop_value,new_value;
#endif
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_collective_io)
assert (IS_H5FD_MPIO(io_info->dset->oloc.file));
-
+
/* Obtain the data transfer properties */
if(NULL == (plist = H5I_object(io_info->dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list")
-
+
/* Check the optional property list on what to do with collective chunk IO. */
chunk_opt_mode=(H5FD_mpio_chunk_opt_t)H5P_peek_unsigned(plist,H5D_XFER_MPIO_CHUNK_OPT_HARD_NAME);
#ifdef KENT
printf("chunk_opt_mode = %d\n",chunk_opt_mode);
#endif
-
+
if(chunk_opt_mode == H5FD_MPIO_CHUNK_ONE_IO) io_option = H5D_ONE_LINK_CHUNK_IO;/*no opt*/
else if(chunk_opt_mode == H5FD_MPIO_CHUNK_MULTI_IO) io_option = H5D_MULTI_CHUNK_IO;/*no opt */
else {
- if(H5D_mpio_get_sum_chunk(io_info,fm,&sum_chunk)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to obtain the total chunk number of all processes");
+ if(H5D_mpio_get_sum_chunk(io_info,fm,&sum_chunk)<0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to obtain the total chunk number of all processes");
if((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file))<0)
HGOTO_ERROR (H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size");
-
+
if(NULL == (plist = H5I_object(io_info->dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list")
@@ -732,8 +732,8 @@ H5D_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool
HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to get property value");
}
}
-
-
+
+
#endif
#ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
if(io_option == H5D_ONE_LINK_CHUNK_IO ) io_option = H5D_MULTI_CHUNK_IO ;/* We can not do this with one chunk IO. */
@@ -745,9 +745,9 @@ H5D_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool
if(H5D_link_chunk_collective_io(io_info,fm,buf,do_write,sum_chunk)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish linked chunk MPI-IO");
}
-
+
else { /*multiple chunk IOs without opt */
-
+
if(H5D_multi_chunk_collective_io(io_info,fm,buf,do_write)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish multiple chunk MPI-IO");
@@ -764,7 +764,7 @@ done:
1. Sort the chunk address and chunk info
2. Build up MPI derived datatype for each chunk
3. Build up the final MPI derived datatype
- 4. Use common collective IO routine to do MPI-IO
+ 4. Use common collective IO routine to do MPI-IO
*
* Return: Non-negative on success/Negative on failure
@@ -788,9 +788,9 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
size_t mpi_buf_count;
size_t mpi_file_count;
hbool_t mbt_is_derived=0, /* Whether the buffer (memory) type is derived and needs to be free'd */
- mft_is_derived=0; /* Whether the file type is derived and needs to be free'd */
-
- int mpi_size,mpi_code; /* MPI return code */
+ mft_is_derived=0; /* Whether the file type is derived and needs to be free'd */
+
+ int mpi_size,mpi_code; /* MPI return code */
int i,num_chunk=0,total_chunks;
size_t ori_num_chunk;
@@ -818,8 +818,8 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
char *bc_percent = NULL;
char *bcc_percent = NULL;
#endif
- herr_t ret_value = SUCCEED;
-
+ herr_t ret_value = SUCCEED;
+
FUNC_ENTER_NOAPI_NOINIT(H5D_link_chunk_collective_io)
ori_total_chunks = fm->total_chunks;
H5_ASSIGN_OVERFLOW(total_chunks,ori_total_chunks,hsize_t,int);
@@ -829,7 +829,7 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
H5SL_node_t *chunk_node;
H5D_chunk_info_t *chunk_info;
H5D_storage_t store;
-
+
chunk_node = H5SL_first(fm->fsel);
if(chunk_node == NULL) {
if(H5D_istore_chunkmap(io_info,total_chunks,&chunk_base_addr,fm->down_chunks)<0)
@@ -846,7 +846,7 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
if(HADDR_UNDEF==(chunk_base_addr = H5D_istore_get_addr(io_info,NULL)))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk info from skipped list");
-
+
#ifdef KENT
printf("before inter_collective_io for total chunk = 1 \n");
#endif
@@ -864,7 +864,7 @@ printf("before inter_collective_io for total chunk = 1 \n");
printf("total_chunks = %d\n",(int)total_chunks);
#endif
-
+
if(num_chunk == 0) total_chunk_addr_array = H5MM_malloc(sizeof(haddr_t)*total_chunks);
else
{
@@ -888,12 +888,12 @@ printf("total_chunks = %d\n",(int)total_chunks);
"bc" means 'b-tree iterately obtain all chunk addresses individually',
the default one means 'obtaining the chunk address individually',
*/
-
+
if(bcc_percent=getenv("BCC_PERCENT")){
bsearch_coll_chunk_threshold = atoi(bcc_percent);
assert((bsearch_coll_chunk_threshold >=0) &&(bsearch_coll_chunk_threshold <=100));
}
- else
+ else
bsearch_coll_chunk_threshold = H5D_ALL_CHUNK_ADDR_THRES_COL;
#else
bsearch_coll_chunk_threshold = H5D_ALL_CHUNK_ADDR_THRES_COL; /*This number may be changed according to the performance study */
@@ -902,9 +902,9 @@ printf("total_chunks = %d\n",(int)total_chunks);
if((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file))<0)
HGOTO_ERROR (H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size");
- /* Calculate the actual threshold to obtain all chunk addresses collectively
+ /* Calculate the actual threshold to obtain all chunk addresses collectively
The bigger this number is, the more possible the use of obtaining chunk address collectively. */
- /* For non-optimization one-link IO,
+ /* For non-optimization one-link IO,
actual bsearch threshold is always 0,
we would always want to obtain the chunk addresses individually
for each process. */
@@ -921,7 +921,7 @@ printf("total_chunks = %d\n",(int)total_chunks);
bsearch_chunk_ratio = atoi(bc_percent);
assert((bsearch_chunk_ratio<=100)&&(bsearch_chunk_ratio>=0));
}
- else
+ else
bsearch_chunk_ratio = H5D_ALL_CHUNK_ADDR_THRES_IND;
#else
bsearch_chunk_ratio = H5D_ALL_CHUNK_ADDR_THRES_IND; /*This number may be changed according to the performance study */
@@ -931,20 +931,20 @@ printf("total_chunks = %d\n",(int)total_chunks);
The unit of the threshold is the number of chunks. The value should be at least 1.
It can be calculated as follows:
- if(total_chunks*bsearch_chunk_ratio/100 <=1)
+ if(total_chunks*bsearch_chunk_ratio/100 <=1)
bsearch_chunk_threahold = 1;
- else
+ else
bsearch_chunk_threshold = total_chunks*bsearch_chunk_ratio/100;
- In order to make the caluculation more efficient,
+ In order to make the caluculation more efficient,
we use the following approximate formula to calculate the threshold.
bsearch_chunk_threshold = 1+ (total_chunks*bsearch_chunk_ratio-99)/100;
The only difference is when total_chunks* besearch_chunk_ratio == 100n+99;
- the approximate formula will give value (n+1) instead of n for threshold.
+ the approximate formula will give value (n+1) instead of n for threshold.
That shouldn't matter much from our persective.
- */
-
+ */
+
bsearch_chunk_threshold = 1 +(total_chunks*bsearch_chunk_ratio-99)/100;
if(num_chunk > bsearch_chunk_threshold) many_chunk_opt = H5D_OBTAIN_ALL_CHUNK_ADDR_IND;
if((sum_chunk == 0) && (total_chunks >= H5D_ALL_CHUNK_ADDR_THRES_IND_NUM))
@@ -953,14 +953,14 @@ printf("total_chunks = %d\n",(int)total_chunks);
#ifdef KENT
printf("before sorting the chunk address \n");
#endif
- /* Sort the chunk address
+ /* Sort the chunk address
when chunk optimization selection is either H5D_OBTAIN_*/
if(num_chunk == 0){ /* special case: this process doesn't select anything */
if(H5D_istore_chunkmap(io_info,total_chunks,total_chunk_addr_array,fm->down_chunks)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address");
chunk_base_addr = total_chunk_addr_array[0];
}
-
+
else {
if(H5D_sort_chunk(io_info,fm,chunk_addr_info_array,many_chunk_opt)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to sort chunk address");
@@ -969,8 +969,8 @@ printf("before sorting the chunk address \n");
#ifdef KENT
printf("after sorting the chunk address \n");
#endif
-
- /* Obtain MPI derived datatype from all individual chunks */
+
+ /* Obtain MPI derived datatype from all individual chunks */
for ( i = 0; i < num_chunk; i++) {
/* Disk MPI derived datatype */
if(H5S_mpio_space_type(chunk_addr_info_array[i].chunk_info.fspace,src_type_size,&chunk_ftype[i],
@@ -981,7 +981,7 @@ printf("after sorting the chunk address \n");
if(H5S_mpio_space_type(chunk_addr_info_array[i].chunk_info.mspace,dst_type_size,&chunk_mtype[i],
&mpi_buf_count,&mpi_buf_extra_offset,&mbt_is_derived)<0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't create MPI buf type");
-
+
/* Chunk address relative to the first chunk */
chunk_addr_info_array[i].chunk_addr -= chunk_base_addr;
H5_ASSIGN_OVERFLOW(chunk_disp_array[i],chunk_addr_info_array[i].chunk_addr,haddr_t,MPI_Aint);
@@ -989,7 +989,7 @@ printf("after sorting the chunk address \n");
blocklen_value = 1;
if(num_chunk){
-
+
/* initialize the buffer with the constant value 1 */
H5V_array_fill(blocklen,&blocklen_value,sizeof(int),(size_t)num_chunk);
@@ -1022,7 +1022,7 @@ printf("after sorting the chunk address \n");
else {/* no selection at all for this process */
chunk_final_ftype = MPI_BYTE;
chunk_final_mtype = MPI_BYTE;
-
+
/* buffer, file derived datatypes should be true */
coll_info.mbt_is_derived = 0;
coll_info.mft_is_derived = 0;
@@ -1032,7 +1032,7 @@ printf("after sorting the chunk address \n");
#ifdef KENT
printf("before coming to final collective IO\n");
#endif
-
+
if(H5D_final_collective_io(io_info,&chunk_final_ftype,&chunk_final_mtype,&coll_info,buf,do_write)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish MPI-IO");
@@ -1042,7 +1042,7 @@ printf("before freeing memory inside H5D_link_collective_io ret_value = %d\n",r
#endif
-
+
if (fm->total_chunks != 1) {
if(num_chunk == 0) HDfree(total_chunk_addr_array);
else {
@@ -1072,7 +1072,7 @@ printf("before leaving H5D_link_collective_io ret_value = %d\n",ret_value);
1. Use MPI_gather and MPI_Bcast to obtain IO mode in each chunk(collective/independent/none)
2. Depending on whether the IO mode is collective or independent or none,
Create either MPI derived datatype for each chunk or just do independent IO
- 3. Use common collective IO routine to do MPI-IO
+ 3. Use common collective IO routine to do MPI-IO
*
* Return: Non-negative on success/Negative on failure
*
@@ -1082,8 +1082,8 @@ printf("before leaving H5D_link_collective_io ret_value = %d\n",ret_value);
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool_t do_write)
+static herr_t
+H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool_t do_write)
{
int i,total_chunk;
@@ -1096,7 +1096,7 @@ H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
H5D_storage_t store; /* union of EFL and chunk pointer in file space */
hbool_t select_chunk;
hbool_t last_io_mode_coll = TRUE;
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED;
#ifdef KENT
int mpi_rank;
#endif
@@ -1117,7 +1117,7 @@ H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
#endif
/* obtain IO option for each chunk */
- if(H5D_obtain_mpio_mode(io_info,fm,chunk_io_option,chunk_addr)<0)
+ if(H5D_obtain_mpio_mode(io_info,fm,chunk_io_option,chunk_addr)<0)
HGOTO_ERROR (H5E_DATASET, H5E_CANTRECV, FAIL, "unable to obtain MPIO mode");
for( i = 0; i<total_chunk;i++){
@@ -1140,7 +1140,7 @@ printf("mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
if(NULL ==(chunk_node = H5SL_first(fm->fsel)))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk node from skipped list");
#else
-
+
if(NULL ==(chunk_node = H5SL_first(fm->fsel)))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk node from skipped list");
while(chunk_node){
@@ -1161,30 +1161,30 @@ printf("mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
#endif
}
- if(chunk_io_option[i] == 1){ /*collective IO for this chunk,
+ if(chunk_io_option[i] == 1){ /*collective IO for this chunk,
note: even there is no selection for this process,
the process still needs to contribute MPI NONE TYPE.*/
#ifdef KENT
printf("inside collective chunk IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
#endif
-
+
if(!last_io_mode_coll)
/* Switch back to collective I/O */
if(H5D_ioinfo_make_coll(io_info) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't switch to collective I/O")
-
+
if(select_chunk){
if(H5D_inter_collective_io(io_info,chunk_info->fspace,chunk_info->mspace,
chunk_addr[i],buf,do_write )<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish shared collective MPI-IO");
-
+
}
else{
if(H5D_inter_collective_io(io_info,NULL,NULL,
chunk_addr[i],buf,do_write )<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish shared collective MPI-IO");
-
- }
+
+ }
last_io_mode_coll = TRUE;
}
@@ -1192,7 +1192,7 @@ printf("inside collective chunk IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i
#ifdef KENT
printf("inside independent IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
#endif
-
+
HDassert(chunk_io_option[i] == 0);
if(!select_chunk) continue; /* this process has nothing to do with this chunk, continue! */
if(last_io_mode_coll)
@@ -1206,16 +1206,16 @@ printf("inside independent IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
chunk_info->fspace,chunk_info->mspace,0,
buf);
/* Check return value of the write */
- if (ret_value<0)
+ if (ret_value<0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
}
else {
ret_value = (io_info->ops.read)(io_info,
chunk_info->chunk_points,H5T_get_size(io_info->dset->shared->type),
chunk_info->fspace,chunk_info->mspace,0,
- buf);
+ buf);
/* Check return value from optimized write */
- if (ret_value<0)
+ if (ret_value<0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
}
@@ -1241,7 +1241,7 @@ printf("inside independent IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
*
* Purpose: Routine for the shared part of collective IO between multiple chunk
collective IO and contiguous collective IO
-
+
*
* Return: Non-negative on success/Negative on failure
*
@@ -1251,16 +1251,16 @@ printf("inside independent IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_inter_collective_io(H5D_io_info_t *io_info,const H5S_t *file_space,const H5S_t *mem_space,
- haddr_t addr, const void *buf, hbool_t do_write )
+ haddr_t addr, const void *buf, hbool_t do_write )
{
size_t mpi_buf_count, mpi_file_count; /* Number of "objects" to transfer */
MPI_Datatype mpi_file_type,mpi_buf_type;
hsize_t mpi_buf_offset, mpi_file_offset; /* Offset within dataset where selection (ie. MPI type) begins */
hbool_t mbt_is_derived=0, /* Whether the buffer (memory) type is derived and needs to be free'd */
- mft_is_derived=0; /* Whether the file type is derived and needs to be free'd */
+ mft_is_derived=0; /* Whether the file type is derived and needs to be free'd */
H5D_common_coll_info_t coll_info;
herr_t ret_value = SUCCEED; /* return value */
@@ -1274,11 +1274,11 @@ H5D_inter_collective_io(H5D_io_info_t *io_info,const H5S_t *file_space,const H5S
if(H5S_mpio_space_type(mem_space,H5T_get_size(io_info->dset->shared->type),
&mpi_buf_type,&mpi_buf_count,&mpi_buf_offset,&mbt_is_derived)<0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't create MPI buffer type");
-
+
}
else {
/* For non-selection, participate with a none MPI derived datatype, the count is 0. */
- mpi_buf_type = MPI_BYTE;
+ mpi_buf_type = MPI_BYTE;
mpi_file_type = MPI_BYTE;
mpi_file_count = 0;
mpi_buf_count = 0;
@@ -1305,7 +1305,7 @@ printf("before leaving inter_collective_io ret_value = %d\n",ret_value);
* Function: H5D_final_collective_io
*
* Purpose: Routine for the common part of collective IO with different storages.
-
+
*
* Return: Non-negative on success/Negative on failure
*
@@ -1315,13 +1315,13 @@ printf("before leaving inter_collective_io ret_value = %d\n",ret_value);
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Datatype *mpi_buf_type,
- H5D_common_coll_info_t* coll_info, const void *buf, hbool_t do_write)
+ H5D_common_coll_info_t* coll_info, const void *buf, hbool_t do_write)
{
- int mpi_code; /* MPI return code */
+ int mpi_code; /* MPI return code */
hbool_t plist_is_setup=0; /* Whether the dxpl has been customized */
herr_t ret_value = SUCCEED;
@@ -1338,7 +1338,7 @@ H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Da
plist_is_setup=1;
#ifdef KENT
HDfprintf(stdout,"chunk addr %Hu\n",coll_info->chunk_addr);
- printf("mpi_buf_count %d\n",coll_info->mpi_buf_count);
+ printf("mpi_buf_count %d\n",coll_info->mpi_buf_count);
#endif
if(do_write) {
ret_value = (io_info->ops.write)(io_info,
@@ -1348,7 +1348,7 @@ H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Da
#ifdef KENT
printf("ret_value after final collective IO= %d\n",ret_value);
#endif
- if (ret_value<0)
+ if (ret_value<0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
}
else {
@@ -1356,11 +1356,11 @@ H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Da
coll_info->mpi_buf_count,0,NULL,NULL,coll_info->chunk_addr,
buf);
/* Check return value from optimized write */
- if (ret_value<0)
+ if (ret_value<0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
}
done:
-
+
/* Reset the dxpl settings */
if(plist_is_setup) {
if(H5FD_mpi_teardown_collective(io_info->dxpl_id)<0)
@@ -1371,7 +1371,7 @@ H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Da
if (coll_info->mbt_is_derived) {
if (MPI_SUCCESS != (mpi_code= MPI_Type_free( mpi_buf_type )))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code);
-
+
}
if (coll_info->mft_is_derived) {
if (MPI_SUCCESS != (mpi_code= MPI_Type_free( mpi_file_type )))
@@ -1392,7 +1392,7 @@ H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Da
Description:
root will collective all chunk addresses and broadcast towards other processes.
-
+
Parameters:
Input: H5D_io_info_t* io_info,
@@ -1430,7 +1430,7 @@ H5D_pre_sort_chunk(H5D_io_info_t *io_info,int total_chunks,haddr_t total_chunk_a
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_addrtype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
-
+
mpi_type_cleanup = 1;
if(mpi_rank == root) {
@@ -1441,7 +1441,7 @@ H5D_pre_sort_chunk(H5D_io_info_t *io_info,int total_chunks,haddr_t total_chunk_a
/* Broadcasting the MPI_IO option info. and chunk address info. */
if(MPI_SUCCESS !=(mpi_code = MPI_Bcast(total_chunk_addr_array,1,chunk_addrtype,root,comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_BCast failed", mpi_code);
-
+
done:
if(mpi_type_cleanup){
@@ -1462,12 +1462,12 @@ done:
For most cases, the chunk address has already been sorted in increasing order.
The special sorting flag is used to optimize this common case.
quick sort is used for necessary sorting.
-
+
Parameters:
Input: H5D_io_info_t* io_info,
fm_map *fm(global chunk map struct)
- Input/Output: H5D_chunk_addr_info_t chunk_addr_info_array[] : array to store chunk address and information
- many_chunk_opt : flag to optimize the way to obtain chunk addresses
+ Input/Output: H5D_chunk_addr_info_t chunk_addr_info_array[] : array to store chunk address and information
+ many_chunk_opt : flag to optimize the way to obtain chunk addresses
for many chunks
*
* Return: Non-negative on success/Negative on failure
@@ -1479,7 +1479,7 @@ done:
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_sort_chunk(H5D_io_info_t * io_info,
fm_map *fm,
H5D_chunk_addr_info_t chunk_addr_info_array[],
@@ -1500,7 +1500,7 @@ H5D_sort_chunk(H5D_io_info_t * io_info,
H5D_storage_t store; /*union of EFL and chunk pointer in file space */
hbool_t do_sort = FALSE;
herr_t ret_value = SUCCEED; /*return value */
-
+
FUNC_ENTER_NOAPI_NOINIT(H5D_sort_chunk)
num_chunks = H5SL_count(fm->fsel);
@@ -1563,7 +1563,7 @@ printf("Coming inside H5D_OBTAIN_ALL_CHUNK_ADDR_COL\n");
if(HADDR_UNDEF==(chunk_addr = H5D_istore_get_addr(io_info,NULL)))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk info from skipped list");
}
- else
+ else
chunk_addr = total_chunk_addr_array[chunk_info->index];
chunk_addr_info_array[i].chunk_addr = chunk_addr;
chunk_addr_info_array[i].chunk_info = *chunk_info;
@@ -1574,12 +1574,12 @@ printf("Coming inside H5D_OBTAIN_ALL_CHUNK_ADDR_COL\n");
chunk_info = H5SL_item(chunk_node);
store.chunk.offset = chunk_info->coords;
store.chunk.index = chunk_info->index;
-
+
if(many_chunk_opt == H5D_OBTAIN_ONE_CHUNK_ADDR_IND){
if(HADDR_UNDEF==(chunk_addr = H5D_istore_get_addr(io_info,NULL)))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk info from skipped list");
}
- else
+ else
chunk_addr = total_chunk_addr_array[chunk_info->index];
if(chunk_addr < chunk_addr_info_array[i].chunk_addr) do_sort = TRUE;
@@ -1605,7 +1605,7 @@ done:
}
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_sort_chunk() */
-
+
/*-------------------------------------------------------------------------
* Function: H5D_obtain_mpio_mode
@@ -1616,11 +1616,11 @@ done:
Description:
1) Each process provides two piece of information for all chunks with selection
- a) chunk index
+ a) chunk index
b) wheather this chunk is regular(for MPI derived datatype not working case)
2) Gather all the information to the root process
-
+
3) Root process will do the following:
a) Obtain chunk address for all chunks in this data space
b) With the consideration of the user option, calculate IO mode for each chunk
@@ -1628,7 +1628,7 @@ done:
in order to do MPI Bcast only once
d) MPI Bcast the IO mode and chunk address information for each chunk.
4) Each process then retrieves IO mode and chunk address information to assign_io_mode and chunk_addr.
-
+
Parameters:
Input: H5D_io_info_t* io_info,
@@ -1645,8 +1645,8 @@ done:
*-------------------------------------------------------------------------
*/
-static herr_t
-H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
+static herr_t
+H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
fm_map *fm,
uint8_t assign_io_mode[],
haddr_t chunk_addr[])
@@ -1661,7 +1661,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
uint8_t* mergebuf=NULL;
uint8_t* tempbuf;
- H5SL_node_t* chunk_node;
+ H5SL_node_t* chunk_node;
H5D_chunk_info_t* chunk_info;
MPI_Datatype bastype[2];
@@ -1688,7 +1688,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
FUNC_ENTER_NOAPI_NOINIT(H5D_obtain_mpio_mode)
/* Assign the rank 0 to the root */
- root = 0;
+ root = 0;
comm = io_info->comm;
/* Obtain the number of process and the current rank of the process */
@@ -1696,7 +1696,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
HGOTO_ERROR (H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank");
if((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file))<0)
HGOTO_ERROR (H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size");
-
+
/* Allocate memory */
ori_total_chunks = fm->total_chunks;
H5_ASSIGN_OVERFLOW(total_chunks,ori_total_chunks,hsize_t,int);
@@ -1704,30 +1704,30 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
/* Obtain the data transfer properties */
if(NULL == (plist = H5I_object(io_info->dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list")
-
+
percent_nproc_per_chunk=H5P_peek_unsigned(plist,H5D_XFER_MPIO_CHUNK_OPT_RATIO_NAME);
#if defined(H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS) && defined(H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS)
-
+
chunk_opt_mode=(H5FD_mpio_chunk_opt_t)H5P_peek_unsigned(plist,H5D_XFER_MPIO_CHUNK_OPT_HARD_NAME);
if((chunk_opt_mode == H5FD_MPIO_CHUNK_MULTI_IO) || (percent_nproc_per_chunk == 0)){
if(H5D_istore_chunkmap(io_info,total_chunks,chunk_addr,fm->down_chunks)<0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address");
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address");
for(ic = 0; ic<total_chunks;ic++)
assign_io_mode[ic] = H5D_CHUNK_IO_MODE_COL;
goto done;
}
-#endif
+#endif
threshold_nproc_per_chunk = mpi_size * percent_nproc_per_chunk/100;
io_mode_info = (uint8_t *)H5MM_calloc(total_chunks*sizeof(MPI_BYTE));
mergebuf = H5MM_malloc((sizeof(haddr_t)+sizeof(MPI_BYTE))*total_chunks);
tempbuf = mergebuf + sizeof(MPI_BYTE)*total_chunks;
- if(mpi_rank == root)
+ if(mpi_rank == root)
recv_io_mode_info = (uint8_t *)H5MM_malloc(total_chunks*sizeof(MPI_BYTE)*mpi_size);
-
+
mem_cleanup = 1;
chunk_node = H5SL_first(fm->fsel);
@@ -1750,7 +1750,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
chunk_node = H5SL_next(chunk_node);
}
-
+
/*Create sent MPI derived datatype */
if(MPI_SUCCESS !=(mpi_code = MPI_Type_contiguous(total_chunks,MPI_BYTE,&stype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Comm_rank failed", mpi_code);
@@ -1764,7 +1764,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
basdisp[0] = 0;
basdisp[1] = (MPI_Aint)(sizeof(MPI_BYTE)*total_chunks);/* may need to check overflow */
bastype[0] = MPI_BYTE;
-
+
if(MPI_SUCCESS !=(mpi_code = MPI_Type_contiguous(sizeof(haddr_t),MPI_BYTE,&chunk_addrtype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code);
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_addrtype)))
@@ -1792,7 +1792,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
int* ind_this_chunk;
#endif
- /* pre-computing: calculate number of processes and
+ /* pre-computing: calculate number of processes and
regularity of the selection occupied in each chunk */
nproc_per_chunk = (int*)H5MM_calloc(total_chunks*sizeof(int));
#if !defined(H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS) || !defined(H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS)
@@ -1866,7 +1866,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
new_value = 0;
if(H5Pset(io_info->dxpl_id,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,&new_value)<0)
HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to set property value");
-#else
+#else
for(ic = 0; ic < total_chunks; ic++){
if(assign_io_mode[ic] == H5D_CHUNK_IO_MODE_COL) {
new_value = 0;
@@ -1893,7 +1893,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
}
}
#endif
-
+
done:
if(mpi_type_cleanup) {
@@ -1910,7 +1910,7 @@ done:
if(mem_cleanup){
HDfree(io_mode_info);
HDfree(mergebuf);
- if(mpi_rank == root)
+ if(mpi_rank == root)
HDfree(recv_io_mode_info);
}
@@ -1923,7 +1923,7 @@ H5D_cmp_chunk_addr(const void *chunk_addr_info1, const void *chunk_addr_info2)
haddr_t addr1, addr2;
FUNC_ENTER_NOAPI_NOINIT(H5D_cmp_chunk_addr)
-
+
addr1 = ((const H5D_chunk_addr_info_t *)chunk_addr_info1)->chunk_addr;
addr2 = ((const H5D_chunk_addr_info_t *)chunk_addr_info2)->chunk_addr;
diff --git a/src/H5Doh.c b/src/H5Doh.c
index 4da2381..ff42659 100644
--- a/src/H5Doh.c
+++ b/src/H5Doh.c
@@ -173,7 +173,7 @@ H5O_dset_free_copy_file_udata(void *_udata)
H5T_close(udata->src_dtype);
/* Release copy of dataset's filter pipeline, if it was set */
- if (udata->src_pline)
+ if (udata->src_pline)
H5O_free(H5O_PLINE_ID, udata->src_pline);
/* Release space for 'copy file' user data */
diff --git a/src/H5E.c b/src/H5E.c
index f7519bb..a891ee8 100644
--- a/src/H5E.c
+++ b/src/H5E.c
@@ -108,7 +108,7 @@ static ssize_t H5E_get_num(const H5E_t *err_stack);
static herr_t H5E_pop(H5E_t *err_stack, size_t count);
static herr_t H5E_clear_entries(H5E_t *estack, size_t nentries);
static herr_t H5E_print_stack(const H5E_t *estack, FILE *stream, hbool_t bk_compatible);
-static herr_t H5E_walk_stack(const H5E_t *estack, H5E_direction_t direction, H5E_walk_t func,
+static herr_t H5E_walk_stack(const H5E_t *estack, H5E_direction_t direction, H5E_walk_t func,
H5E_walk_stack_t stack_func, hbool_t bk_compatible, void *client_data);
static herr_t H5E_walk_cb(unsigned n, const H5E_error_t *err_desc, void *client_data);
static herr_t H5E_walk_stack_cb(unsigned n, const H5E_error_stack_t *err_desc, void *client_data);
@@ -1991,7 +1991,7 @@ H5E_print_stack(const H5E_t *estack, FILE *stream, hbool_t bk_compatible)
if(bk_compatible) {
if(H5E_walk_stack(estack, H5E_WALK_DOWNWARD, H5E_walk_cb, NULL, TRUE, (void*)&eprint)<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTLIST, FAIL, "can't walk error stack")
- } else {
+ } else {
if(H5E_walk_stack(estack, H5E_WALK_DOWNWARD, NULL, H5E_walk_stack_cb, FALSE, (void*)&eprint)<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTLIST, FAIL, "can't walk error stack")
}
@@ -2102,16 +2102,16 @@ done:
* means to start at the API and end at the inner-most function
* where the error was first detected.
*
- * The function pointed to by STACK_FUNC will be called for
- * each error record in the error stack. It's arguments will
- * include an index number (beginning at zero regardless of
- * stack traversal direction), an error stack entry, and the
+ * The function pointed to by STACK_FUNC will be called for
+ * each error record in the error stack. It's arguments will
+ * include an index number (beginning at zero regardless of
+ * stack traversal direction), an error stack entry, and the
* CLIENT_DATA pointer passed to H5E_print_stack.
*
* The function FUNC is also provided for backward compatibility.
* When BK_COMPATIBLE is set to be TRUE, FUNC is used to be
* compatible with older library. If BK_COMPATIBLE is FALSE,
- * STACK_FUNC is used.
+ * STACK_FUNC is used.
*
* Return: Non-negative on success/Negative on failure
*
@@ -2126,13 +2126,13 @@ done:
*
* Raymond Lu
* Friday, May 12, 2006
- * Added backward compatibility support. FUNC is for older
+ * Added backward compatibility support. FUNC is for older
* library; STACK_FUNC is for new library.
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5E_walk_stack(const H5E_t *estack, H5E_direction_t direction, H5E_walk_t func, H5E_walk_stack_t stack_func,
+H5E_walk_stack(const H5E_t *estack, H5E_direction_t direction, H5E_walk_t func, H5E_walk_stack_t stack_func,
hbool_t bk_compatible, void *client_data)
{
int i; /* Local index variable */
@@ -2216,9 +2216,9 @@ done:
* Purpose: This is a default error stack traversal callback function
* that prints error messages to the specified output stream.
* It is not meant to be called directly but rather as an
- * argument to the H5Ewalk_stack() function. This function is
- * called also by H5Eprint_stack(). Application writers are
- * encouraged to use this function as a model for their own
+ * argument to the H5Ewalk_stack() function. This function is
+ * called also by H5Eprint_stack(). Application writers are
+ * encouraged to use this function as a model for their own
* error stack walking functions.
*
* N is a counter for how many times this function has been
@@ -2351,7 +2351,7 @@ H5E_walk_stack_cb(unsigned n, const H5E_error_stack_t *err_desc, void *client_da
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Raymond Lu
+ * Programmer: Raymond Lu
* Thursday, May 11, 2006
*
* Modifications:
diff --git a/src/H5Eterm.h b/src/H5Eterm.h
index c2c206e..0b3a85e 100644
--- a/src/H5Eterm.h
+++ b/src/H5Eterm.h
@@ -20,180 +20,180 @@
#define _H5Eterm_H
/* Reset major error IDs */
-
-H5E_DATASET_g=
-H5E_FUNC_g=
-H5E_STORAGE_g=
-H5E_FILE_g=
-H5E_SYM_g=
-H5E_VFL_g=
-H5E_INTERNAL_g=
-H5E_BTREE_g=
-H5E_REFERENCE_g=
-H5E_DATASPACE_g=
-H5E_RESOURCE_g=
-H5E_PLIST_g=
-H5E_DATATYPE_g=
-H5E_RS_g=
-H5E_HEAP_g=
-H5E_OHDR_g=
-H5E_ATOM_g=
-H5E_ATTR_g=
-H5E_NONE_MAJOR_g=
-H5E_IO_g=
-H5E_SLIST_g=
-H5E_EFL_g=
-H5E_TST_g=
-H5E_ARGS_g=
-H5E_ERROR_g=
-H5E_PLINE_g=
-H5E_FSPACE_g=
+
+H5E_DATASET_g=
+H5E_FUNC_g=
+H5E_STORAGE_g=
+H5E_FILE_g=
+H5E_SYM_g=
+H5E_VFL_g=
+H5E_INTERNAL_g=
+H5E_BTREE_g=
+H5E_REFERENCE_g=
+H5E_DATASPACE_g=
+H5E_RESOURCE_g=
+H5E_PLIST_g=
+H5E_DATATYPE_g=
+H5E_RS_g=
+H5E_HEAP_g=
+H5E_OHDR_g=
+H5E_ATOM_g=
+H5E_ATTR_g=
+H5E_NONE_MAJOR_g=
+H5E_IO_g=
+H5E_SLIST_g=
+H5E_EFL_g=
+H5E_TST_g=
+H5E_ARGS_g=
+H5E_ERROR_g=
+H5E_PLINE_g=
+H5E_FSPACE_g=
H5E_CACHE_g= (-1);
/* Reset minor error IDs */
-/* Generic low-level file I/O errors */
-H5E_SEEKERROR_g=
-H5E_READERROR_g=
-H5E_WRITEERROR_g=
-H5E_CLOSEERROR_g=
-H5E_OVERFLOW_g=
+/* Generic low-level file I/O errors */
+H5E_SEEKERROR_g=
+H5E_READERROR_g=
+H5E_WRITEERROR_g=
+H5E_CLOSEERROR_g=
+H5E_OVERFLOW_g=
H5E_FCNTL_g=
-/* Resource errors */
-H5E_NOSPACE_g=
-H5E_CANTALLOC_g=
-H5E_CANTCOPY_g=
-H5E_CANTFREE_g=
-H5E_ALREADYEXISTS_g=
-H5E_CANTLOCK_g=
-H5E_CANTUNLOCK_g=
-H5E_CANTGC_g=
+/* Resource errors */
+H5E_NOSPACE_g=
+H5E_CANTALLOC_g=
+H5E_CANTCOPY_g=
+H5E_CANTFREE_g=
+H5E_ALREADYEXISTS_g=
+H5E_CANTLOCK_g=
+H5E_CANTUNLOCK_g=
+H5E_CANTGC_g=
H5E_CANTGETSIZE_g=
-/* Heap errors */
-H5E_CANTRESTORE_g=
-H5E_CANTCOMPUTE_g=
-H5E_CANTEXTEND_g=
-H5E_CANTATTACH_g=
+/* Heap errors */
+H5E_CANTRESTORE_g=
+H5E_CANTCOMPUTE_g=
+H5E_CANTEXTEND_g=
+H5E_CANTATTACH_g=
H5E_CANTUPDATE_g=
-/* Function entry/exit interface errors */
-H5E_CANTINIT_g=
-H5E_ALREADYINIT_g=
+/* Function entry/exit interface errors */
+H5E_CANTINIT_g=
+H5E_ALREADYINIT_g=
H5E_CANTRELEASE_g=
-/* Property list errors */
-H5E_CANTGET_g=
-H5E_CANTSET_g=
+/* Property list errors */
+H5E_CANTGET_g=
+H5E_CANTSET_g=
H5E_DUPCLASS_g=
-/* Free space errors */
-H5E_CANTMERGE_g=
-H5E_CANTREVIVE_g=
+/* Free space errors */
+H5E_CANTMERGE_g=
+H5E_CANTREVIVE_g=
H5E_CANTSHRINK_g=
-/* Object header related errors */
-H5E_LINKCOUNT_g=
-H5E_VERSION_g=
-H5E_ALIGNMENT_g=
-H5E_BADMESG_g=
-H5E_CANTDELETE_g=
-H5E_BADITER_g=
+/* Object header related errors */
+H5E_LINKCOUNT_g=
+H5E_VERSION_g=
+H5E_ALIGNMENT_g=
+H5E_BADMESG_g=
+H5E_CANTDELETE_g=
+H5E_BADITER_g=
H5E_CANTPACK_g=
-/* System level errors */
+/* System level errors */
H5E_SYSERRSTR_g=
-/* I/O pipeline errors */
-H5E_NOFILTER_g=
-H5E_CALLBACK_g=
-H5E_CANAPPLY_g=
-H5E_SETLOCAL_g=
+/* I/O pipeline errors */
+H5E_NOFILTER_g=
+H5E_CALLBACK_g=
+H5E_CANAPPLY_g=
+H5E_SETLOCAL_g=
H5E_NOENCODER_g=
-/* Group related errors */
-H5E_CANTOPENOBJ_g=
-H5E_CANTCLOSEOBJ_g=
-H5E_COMPLEN_g=
-H5E_LINK_g=
-H5E_SLINK_g=
+/* Group related errors */
+H5E_CANTOPENOBJ_g=
+H5E_CANTCLOSEOBJ_g=
+H5E_COMPLEN_g=
+H5E_LINK_g=
+H5E_SLINK_g=
H5E_PATH_g=
-/* No error */
+/* No error */
H5E_NONE_MINOR_g=
-/* File accessability errors */
-H5E_FILEEXISTS_g=
-H5E_FILEOPEN_g=
-H5E_CANTCREATE_g=
-H5E_CANTOPENFILE_g=
-H5E_CANTCLOSEFILE_g=
-H5E_NOTHDF5_g=
-H5E_BADFILE_g=
-H5E_TRUNCATED_g=
+/* File accessability errors */
+H5E_FILEEXISTS_g=
+H5E_FILEOPEN_g=
+H5E_CANTCREATE_g=
+H5E_CANTOPENFILE_g=
+H5E_CANTCLOSEFILE_g=
+H5E_NOTHDF5_g=
+H5E_BADFILE_g=
+H5E_TRUNCATED_g=
H5E_MOUNT_g=
-/* Object atom related errors */
-H5E_BADATOM_g=
-H5E_BADGROUP_g=
-H5E_CANTREGISTER_g=
-H5E_CANTINC_g=
-H5E_CANTDEC_g=
+/* Object atom related errors */
+H5E_BADATOM_g=
+H5E_BADGROUP_g=
+H5E_CANTREGISTER_g=
+H5E_CANTINC_g=
+H5E_CANTDEC_g=
H5E_NOIDS_g=
-/* Cache related errors */
-H5E_CANTFLUSH_g=
-H5E_CANTSERIALIZE_g=
-H5E_CANTLOAD_g=
-H5E_PROTECT_g=
-H5E_NOTCACHED_g=
-H5E_SYSTEM_g=
-H5E_CANTINS_g=
-H5E_CANTRENAME_g=
-H5E_CANTPROTECT_g=
-H5E_CANTUNPROTECT_g=
-H5E_CANTPIN_g=
-H5E_CANTUNPIN_g=
-H5E_CANTMARKDIRTY_g=
+/* Cache related errors */
+H5E_CANTFLUSH_g=
+H5E_CANTSERIALIZE_g=
+H5E_CANTLOAD_g=
+H5E_PROTECT_g=
+H5E_NOTCACHED_g=
+H5E_SYSTEM_g=
+H5E_CANTINS_g=
+H5E_CANTRENAME_g=
+H5E_CANTPROTECT_g=
+H5E_CANTUNPROTECT_g=
+H5E_CANTPIN_g=
+H5E_CANTUNPIN_g=
+H5E_CANTMARKDIRTY_g=
H5E_CANTDIRTY_g=
-/* Parallel MPI errors */
-H5E_MPI_g=
-H5E_MPIERRSTR_g=
+/* Parallel MPI errors */
+H5E_MPI_g=
+H5E_MPIERRSTR_g=
H5E_CANTRECV_g=
-/* Dataspace errors */
-H5E_CANTCLIP_g=
-H5E_CANTCOUNT_g=
-H5E_CANTSELECT_g=
-H5E_CANTNEXT_g=
-H5E_BADSELECT_g=
+/* Dataspace errors */
+H5E_CANTCLIP_g=
+H5E_CANTCOUNT_g=
+H5E_CANTSELECT_g=
+H5E_CANTNEXT_g=
+H5E_BADSELECT_g=
H5E_CANTCOMPARE_g=
-/* B-tree related errors */
-H5E_NOTFOUND_g=
-H5E_EXISTS_g=
-H5E_CANTENCODE_g=
-H5E_CANTDECODE_g=
-H5E_CANTSPLIT_g=
-H5E_CANTREDISTRIBUTE_g=
-H5E_CANTSWAP_g=
-H5E_CANTINSERT_g=
-H5E_CANTLIST_g=
-H5E_CANTMODIFY_g=
+/* B-tree related errors */
+H5E_NOTFOUND_g=
+H5E_EXISTS_g=
+H5E_CANTENCODE_g=
+H5E_CANTDECODE_g=
+H5E_CANTSPLIT_g=
+H5E_CANTREDISTRIBUTE_g=
+H5E_CANTSWAP_g=
+H5E_CANTINSERT_g=
+H5E_CANTLIST_g=
+H5E_CANTMODIFY_g=
H5E_CANTREMOVE_g=
-/* Argument errors */
-H5E_UNINITIALIZED_g=
-H5E_UNSUPPORTED_g=
-H5E_BADTYPE_g=
-H5E_BADRANGE_g=
+/* Argument errors */
+H5E_UNINITIALIZED_g=
+H5E_UNSUPPORTED_g=
+H5E_BADTYPE_g=
+H5E_BADRANGE_g=
H5E_BADVALUE_g=
-/* Datatype conversion errors */
-H5E_CANTCONVERT_g=
+/* Datatype conversion errors */
+H5E_CANTCONVERT_g=
H5E_BADSIZE_g= (-1);
#endif /* H5Eterm_H */
diff --git a/src/H5F.c b/src/H5F.c
index 0d80e51..24f4e9c 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -1428,8 +1428,8 @@ done:
* property list changes.
*
* J Mainzer, Jun 30, 2005
- * Added lf parameter so the shared->lf field can be
- * initialized prior to the call to H5AC_create() if a
+ * Added lf parameter so the shared->lf field can be
+ * initialized prior to the call to H5AC_create() if a
* new instance of H5F_file_t is created. lf should be
* NULL if shared isn't, and vise versa.
*
@@ -1458,7 +1458,7 @@ H5F_new(H5F_file_t *shared, hid_t fcpl_id, hid_t fapl_id, H5FD_t *lf)
f->shared->freespace_addr = HADDR_UNDEF;
f->shared->driver_addr = HADDR_UNDEF;
f->shared->lf = lf;
-
+
/*
* Copy the file creation and file access property lists into the
* new file handle. We do this early because some values might need
diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c
index 0e4a453..c826163 100644
--- a/src/H5FDfamily.c
+++ b/src/H5FDfamily.c
@@ -60,7 +60,7 @@ typedef struct H5FD_family_t {
hid_t memb_fapl_id; /*file access property list for members */
hsize_t memb_size; /*actual size of each member file */
hsize_t pmem_size; /*member size passed in from property */
- hsize_t mem_newsize; /*new member size passed in as private
+ hsize_t mem_newsize; /*new member size passed in as private
*property. It's used only by h5repart */
unsigned nmembs; /*number of family members */
unsigned amembs; /*number of member slots allocated */
diff --git a/src/H5FDmpi.h b/src/H5FDmpi.h
index b7b4162..6c2a2c5 100644
--- a/src/H5FDmpi.h
+++ b/src/H5FDmpi.h
@@ -21,8 +21,8 @@
#ifndef H5FDmpi_H
#define H5FDmpi_H
-/***** Macros for One linked collective IO case. *****/
-/* The default value to do one linked collective IO for all chunks.
+/***** Macros for One linked collective IO case. *****/
+/* The default value to do one linked collective IO for all chunks.
If the average number of chunks per process is greater than this value,
the library will create an MPI derived datatype to link all chunks to do collective IO.
The user can set this value through an API. */
diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c
index f639182..0be55e3 100644
--- a/src/H5FDmpio.c
+++ b/src/H5FDmpio.c
@@ -543,14 +543,14 @@ Description:
The library won't behave as it asks for only when we find
that the low-level MPI-IO package doesn't support this.
-Parameters:
+Parameters:
hid_t dxpl_id in: Data transfer property list identifier
H5FD_mpio_chunk_opt_t in: The optimization flag for linked chunk IO
or multi-chunk IO.
-
-Returns:
-Returns a non-negative value if successful. Otherwise returns a negative value.
+
+Returns:
+Returns a non-negative value if successful. Otherwise returns a negative value.
*
*-------------------------------------------------------------------------
*/
@@ -590,15 +590,15 @@ Purpose:
To set a threshold for doing linked chunk IO
Description:
- If the number is greater than the threshold set by the user,
+ If the number is greater than the threshold set by the user,
the library will do linked chunk IO; otherwise, IO will be done for every chunk.
-Parameters:
+Parameters:
hid_t dxpl_id in: Data transfer property list identifier
- unsigned num_proc_per_chunk in: the threshold of the average number of chunks selected by each process
+ unsigned num_proc_per_chunk in: the threshold of the average number of chunks selected by each process
-Returns:
-Returns a non-negative value if successful. Otherwise returns a negative value.
+Returns:
+Returns a non-negative value if successful. Otherwise returns a negative value.
*
*-------------------------------------------------------------------------
*/
@@ -637,13 +637,13 @@ Purpose:
To set a threshold for doing collective IO for each chunk
Description:
The library will calculate the percentage of the number of process holding selections at each chunk. If that percentage of number of process in the individual chunk is greater than the threshold set by the user, the library will do collective chunk IO for this chunk; otherwise, independent IO will be done for this chunk.
-Parameters:
- hid_t dxpl_id
+Parameters:
+ hid_t dxpl_id
in: Data transfer property list identifier
- unsigned percent_num_proc_per_chunk
+ unsigned percent_num_proc_per_chunk
in: the threshold of the percentage of the number of process holding selections per chunk
-Returns:
-Returns a non-negative value if successful. Otherwise returns a negative value.
+Returns:
+Returns a non-negative value if successful. Otherwise returns a negative value.
*
@@ -1701,7 +1701,7 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
/* The metadata cache now only writes from process 0, which makes
* this synchronization incorrect. I'm leaving this code commented
* out instead of deleting it to remind us that we should re-write
- * this function so that a metadata write from any other process
+ * this function so that a metadata write from any other process
* should flag an error.
* -- JRM 9/1/05
*/
@@ -1772,13 +1772,13 @@ done:
#if 0 /* JRM */
/* Since metadata writes are now done by process 0 only, this broadcast
- * is no longer needed. I leave it in and commented out to remind us
+ * is no longer needed. I leave it in and commented out to remind us
* that we need to re-work this function to reflect this reallity.
*
* -- JRM 9/1/05
*/
- /* if only one process writes, need to broadcast the ret_value to
- * other processes
+ /* if only one process writes, need to broadcast the ret_value to
+ * other processes
*/
if (type!=H5FD_MEM_DRAW) {
if (MPI_SUCCESS != (mpi_code=MPI_Bcast(&ret_value, sizeof(ret_value), MPI_BYTE, H5_PAR_META_WRITE, file->comm)))
diff --git a/src/H5FDmpiposix.c b/src/H5FDmpiposix.c
index de491f0..2809539 100644
--- a/src/H5FDmpiposix.c
+++ b/src/H5FDmpiposix.c
@@ -913,7 +913,7 @@ done:
* John Mainzer -- 9/21/05
* Modified code to turn off the
* H5FD_FEAT_ACCUMULATE_METADATA_WRITE flag.
- * With the movement of all cache writes to process 0,
+ * With the movement of all cache writes to process 0,
* this flag has become problematic in PHDF5.
*
*-------------------------------------------------------------------------
@@ -1233,10 +1233,10 @@ H5FD_mpiposix_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
/* Metadata specific actions */
/* All metadata is now written from process 0 -- thus this function
- * needs to be re-written to reflect this. For now I have simply
- * commented out the code that attempts to synchronize metadata
+ * needs to be re-written to reflect this. For now I have simply
+ * commented out the code that attempts to synchronize metadata
* writes between processes, but we should really just flag an error
- * whenever any process other than process 0 attempts to write
+ * whenever any process other than process 0 attempts to write
* metadata.
* -- JRM 9/1/05
*/
@@ -1394,7 +1394,7 @@ H5FD_mpiposix_flush(H5FD_t *_file, hid_t UNUSED dxpl_id, unsigned UNUSED closing
if(file->mpi_rank == H5_PAR_META_WRITE) {
#ifdef WIN32
/* Map the posix file handle to a Windows file handle */
- filehandle = _get_osfhandle(file->fd);
+ filehandle = _get_osfhandle(file->fd);
/* Translate 64-bit integers into form Windows wants */
/* [This algorithm is from the Windows documentation for SetFilePointer()] */
diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c
index 652bd53..e7de49f 100644
--- a/src/H5FDmulti.c
+++ b/src/H5FDmulti.c
@@ -1631,7 +1631,7 @@ H5FD_multi_alloc(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, hsize_t size)
if ( addr + size > file->eoa ) {
if ( H5FD_multi_set_eoa(_file, addr + size) < 0 ) {
-
+
H5Epush_ret(func, H5E_ERR_CLS, H5E_INTERNAL, H5E_BADVALUE, \
"can't set eoa", HADDR_UNDEF)
}
diff --git a/src/H5FS.c b/src/H5FS.c
index 87fb4dd..5094405 100644
--- a/src/H5FS.c
+++ b/src/H5FS.c
@@ -1342,7 +1342,7 @@ HDfprintf(stderr, "%s: sect->size = %Hu, sect->addr = %a, sect->type = %u\n", FU
#ifdef QAK
HDfprintf(stderr, "%s: Returning space\n", FUNC);
#endif /* QAK */
-
+
/* Attempt to merge returned section with existing sections */
if(H5FS_sect_merge(f, dxpl_id, fspace, &sect, op_data) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTMERGE, FAIL, "can't merge sections")
diff --git a/src/H5FSprivate.h b/src/H5FSprivate.h
index 281bb57..4ce9d88 100644
--- a/src/H5FSprivate.h
+++ b/src/H5FSprivate.h
@@ -117,7 +117,7 @@ typedef herr_t (*H5FS_operator_t)(const H5FS_section_info_t *sect,
/*****************************/
/* Library-private Variables */
/*****************************/
-
+
/* Declare a free list to manage the H5FS_section_class_t sequence information */
H5FL_SEQ_EXTERN(H5FS_section_class_t);
diff --git a/src/H5G.c b/src/H5G.c
index 34d3c56..c3e300a 100644
--- a/src/H5G.c
+++ b/src/H5G.c
@@ -292,9 +292,9 @@ done:
*
* Usage: H5Gcreate_expand(loc_id, char *name, gcpl_id, gapl_id)
* hid_t loc_id; IN: File or group identifier
- * const char *name; IN: Absolute or relative name of the new group
- * hid_t gcpl_id; IN: Property list for group creation
- * hid_t gapl_id; IN: Property list for group access
+ * const char *name; IN: Absolute or relative name of the new group
+ * hid_t gcpl_id; IN: Property list for group creation
+ * hid_t gapl_id; IN: Property list for group access
*
* Example: To create missing groups "A" and "B01" along the given path "/A/B01/grp"
* hid_t create_id = H5Pcreate(H5P_GROUP_CREATE);
@@ -1066,23 +1066,23 @@ done:
/*-------------------------------------------------------------------------
* Function: H5Gcopy
*
- * Purpose: Copy an object (group or dataset) to destination location
+ * Purpose: Copy an object (group or dataset) to destination location
* within a file or cross files. PLIST_ID is a property list
* which is used to pass user options and properties to the
- * copy.
+ * copy.
*
* OPTIONS THAT MAY APPLY TO COPY IN THE FUTURE.
* H5G_COPY_CREATE_INTERMEDIATE_GROUP_FLAG
* Do not create missing groups when create a group (default)
* Create missing groups when create a group
* H5G_COPY_SHALLOW_HIERARCHY_FLAG
- * Recursively copy all objects below the group (default)
+ * Recursively copy all objects below the group (default)
* Only immediate members.
* H5G_COPY_EXPAND_SOFT_LINK_FLAG
- * Keep soft links as they are (default)
+ * Keep soft links as they are (default)
* Expand them into new objects
* H5G_COPY_EXPAND_EXT_LINK_FLAG
- * Keep external links as they are (default)
+ * Keep external links as they are (default)
* Expand them into new objects
* H5G_COPY_EXPAND_OBJ_REFERENCE_FLAG
* Update only the values of object references (default)
@@ -1094,26 +1094,26 @@ done:
* PROPERTIES THAT MAY APPLY TO COPY IN FUTURE
* Change data layout such as chunk size
* Add filter such as data compression.
- * Add an attribute to the copied object(s) that say the date/time
+ * Add an attribute to the copied object(s) that say the date/time
* for the copy or other information about the source file.
*
* Usage: H5Gcopy(src_loc_id, src_name, dst_loc_id, dst_name, plist_id)
- * hid_t src_loc_id IN: Source file or group identifier.
+ * hid_t src_loc_id IN: Source file or group identifier.
* const char *src_name IN: Name of the source object to be copied
- * hid_t dst_loc_id IN: Destination file or group identifier
- * const char *dst_name IN: Name of the destination object
- * hid_t plist_id IN: Properties which apply to the copy
- *
+ * hid_t dst_loc_id IN: Destination file or group identifier
+ * const char *dst_name IN: Name of the destination object
+ * hid_t plist_id IN: Properties which apply to the copy
+ *
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Peter Cao
- * June 4, 2005
+ * Programmer: Peter Cao
+ * June 4, 2005
*
*-------------------------------------------------------------------------
*/
herr_t
-H5Gcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id,
+H5Gcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id,
const char *dst_name, hid_t plist_id)
{
H5G_loc_t loc; /* Source group group location */
@@ -2987,12 +2987,12 @@ H5G_unmount(H5G_t *grp)
/*-------------------------------------------------------------------------
* Function: H5G_copy
*
- * Purpose: Copy an object to destination location
+ * Purpose: Copy an object to destination location
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Peter Cao
- * June 4, 2005
+ * Programmer: Peter Cao
+ * June 4, 2005
*
*-------------------------------------------------------------------------
*/
diff --git a/src/H5Gloc.c b/src/H5Gloc.c
index a9e907f..57212b7 100644
--- a/src/H5Gloc.c
+++ b/src/H5Gloc.c
@@ -37,7 +37,7 @@
/* User data for looking up an object in a group */
typedef struct {
- H5G_loc_t *loc; /* Group location to set */
+ H5G_loc_t *loc; /* Group location to set */
} H5G_loc_ud1_t;
/* Private macros */
diff --git a/src/H5Gname.c b/src/H5Gname.c
index cc4e77b..fdae809 100644
--- a/src/H5Gname.c
+++ b/src/H5Gname.c
@@ -530,7 +530,7 @@ H5G_name_move_path(H5RS_str_t **path_r_ptr, const char *full_suffix, const char
/* Get pointer to path to update */
path = H5RS_get_str(*path_r_ptr);
HDassert(path);
-
+
/* Check if path needs to be updated */
full_suffix_len = HDstrlen(full_suffix);
path_len = HDstrlen(path);
diff --git a/src/H5Gnode.c b/src/H5Gnode.c
index d6915ea..90ee688 100644
--- a/src/H5Gnode.c
+++ b/src/H5Gnode.c
@@ -1897,7 +1897,7 @@ H5G_node_shared_free (void *_shared)
*
* Return: 0(zero) on success/Negative on failure
*
- * Programmer: Peter Cao
+ * Programmer: Peter Cao
* Sept 10, 2005
*
*-------------------------------------------------------------------------
@@ -1997,7 +1997,7 @@ H5G_node_copy(H5F_t *f, hid_t dxpl_id, const void UNUSED *_lt_key, haddr_t addr,
lnk.type = H5G_LINK_SOFT;
lnk.u.soft.name = H5HL_offset_into(f, heap, src_ent->cache.slink.lval_offset);
} /* else if */
- else
+ else
HDassert(0 && "Unknown entry type");
/* Set up common link data */
@@ -2023,7 +2023,7 @@ H5G_node_copy(H5F_t *f, hid_t dxpl_id, const void UNUSED *_lt_key, haddr_t addr,
if(H5G_stab_insert_real(udata->dst_file, udata->dst_stab, name, &lnk, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5B_ITER_ERROR, "unable to insert the name")
} /* end of for (i=0; i<sn->nsyms; i++) */
-
+
done:
if (heap && H5HL_unprotect(f, dxpl_id, heap, udata->src_heap_addr, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_SYM, H5E_PROTECT, H5B_ITER_ERROR, "unable to unprotect symbol name")
diff --git a/src/H5Gobj.c b/src/H5Gobj.c
index 8846927..88b8cdf 100644
--- a/src/H5Gobj.c
+++ b/src/H5Gobj.c
@@ -48,7 +48,7 @@ typedef struct {
/* User data for looking up an object in a group */
typedef struct {
H5O_link_t *lnk; /* Link information to set for object */
- H5O_loc_t *oloc; /* Object location to set */
+ H5O_loc_t *oloc; /* Object location to set */
} H5G_obj_ud2_t;
/* Private macros */
@@ -119,7 +119,7 @@ H5G_obj_create(H5F_t *f, hid_t dxpl_id,
HDassert(link_size);
/* Compute size of header to use for creation */
- hdr_size = linfo_size +
+ hdr_size = linfo_size +
ginfo_size +
(ginfo->est_num_entries * (link_size + ginfo->est_name_len));
#else /* H5_GROUP_REVISION */
diff --git a/src/H5Gpublic.h b/src/H5Gpublic.h
index fd322a6..9300e8d 100644
--- a/src/H5Gpublic.h
+++ b/src/H5Gpublic.h
@@ -149,7 +149,7 @@ H5_DLL hid_t H5Gcreate_expand(hid_t loc_id, const char *name, hid_t gcpl_id,
hid_t gapl_id);
H5_DLL hid_t H5Gget_create_plist(hid_t group_id);
#endif /* H5_GROUP_REVISION */
-H5_DLL herr_t H5Gcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id,
+H5_DLL herr_t H5Gcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id,
const char *dst_name, hid_t plist_id);
#ifdef __cplusplus
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index d896a3e..15e0605 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -650,7 +650,7 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size,
H5F_addr_decode(f, &p, &heap_addr);
if(H5F_addr_ne(heap_addr, dblock->hdr->heap_addr))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block")
-
+
/* Address of parent block */
dblock->parent = par_info->iblock;
dblock->par_entry = par_info->entry;
@@ -959,7 +959,7 @@ HDfprintf(stderr, "%s: Load indirect block, addr = %a\n", FUNC, addr);
H5F_addr_decode(f, &p, &heap_addr);
if(H5F_addr_ne(heap_addr, iblock->hdr->heap_addr))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block")
-
+
/* Address of parent block */
iblock->parent = par_info->iblock;
iblock->par_entry = par_info->entry;
diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c
index 836d1e6..5aefe7b 100644
--- a/src/H5HFhdr.c
+++ b/src/H5HFhdr.c
@@ -95,7 +95,7 @@ H5FL_DEFINE(H5HF_hdr_t);
/*-------------------------------------------------------------------------
* Function: H5HF_hdr_alloc
*
- * Purpose: Allocate shared fractal heap header
+ * Purpose: Allocate shared fractal heap header
*
* Return: Non-negative on success/Negative on failure
*
diff --git a/src/H5HFiter.c b/src/H5HFiter.c
index ef926e8..94f0b8b 100644
--- a/src/H5HFiter.c
+++ b/src/H5HFiter.c
@@ -166,7 +166,7 @@ HDfprintf(stderr, "%s: offset = %Hu\n", FUNC, offset);
<Adjust offset for block offset for row>
<Make new block level the current context>
<Goto 1>
-
+
*/
do {
/* Walk down the rows in the doubling table until we've found the correct row for the next block */
diff --git a/src/H5HFprivate.h b/src/H5HFprivate.h
index b9ae0f4..4379dee 100644
--- a/src/H5HFprivate.h
+++ b/src/H5HFprivate.h
@@ -85,7 +85,7 @@ typedef struct H5HF_t H5HF_t;
/*****************************/
/* Library-private Variables */
/*****************************/
-
+
/***************************************/
/* Library-private Function Prototypes */
/***************************************/
diff --git a/src/H5HL.c b/src/H5HL.c
index e86be3d..cdfe49b 100644
--- a/src/H5HL.c
+++ b/src/H5HL.c
@@ -319,16 +319,16 @@ done:
*
* John Mainzer, 8/10/05
* Reworked this function for a different role.
- *
- * It used to be called during cache eviction, where it
- * attempted to size the disk space allocation for the
+ *
+ * It used to be called during cache eviction, where it
+ * attempted to size the disk space allocation for the
* actual size of the heap. However, this causes problems
* in the parallel case, as the reuslting disk allocations
* may not be synchronized.
*
- * It is now called from H5HL_remove(), where it is used to
+ * It is now called from H5HL_remove(), where it is used to
* reduce heap size in response to an entry deletion. This
- * means that the function should either do nothing, or
+ * means that the function should either do nothing, or
* reduce the size of the disk allocation.
*
*-------------------------------------------------------------------------
@@ -546,8 +546,8 @@ H5HL_serialize(H5F_t *f, H5HL_t *heap, uint8_t *buf)
*
* John Mainzer, 2005-08-10
* Removed call to H5HL_minimize_heap_space(). It does disk space
- * allocation, which can cause problems if done at flush time.
- * Instead, disk space allocation/deallocation is now done at
+ * allocation, which can cause problems if done at flush time.
+ * Instead, disk space allocation/deallocation is now done at
* insert/remove time.
*
*-------------------------------------------------------------------------
@@ -956,7 +956,7 @@ H5HL_remove_free(H5HL_t *heap, H5HL_free_t *fl)
*
* John Mainzer, 8/10/05
* Modified code to allocate file space as needed, instead
- * of allocating it on eviction.
+ * of allocating it on eviction.
*
*-------------------------------------------------------------------------
*/
@@ -1068,9 +1068,9 @@ H5HL_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, size_t buf_size, const void *
else { /* ...if we can't, allocate a new chunk & release the old */
haddr_t new_addr;
- /* The new allocation may fail -- to avoid the possiblity of
+ /* The new allocation may fail -- to avoid the possiblity of
* file corruption, allocate the new heap first, and then
- * deallocate the old.
+ * deallocate the old.
*/
/* allocate new disk space for the heap */
diff --git a/src/H5O.c b/src/H5O.c
index 18eee25..14b0b09 100644
--- a/src/H5O.c
+++ b/src/H5O.c
@@ -140,7 +140,7 @@ const H5O_msg_class_t *const H5O_msg_class_g[] = {
/* Header object ID to class mapping */
/*
* Initialize the object class info table. Begin with the most general types
- * and end with the most specific. For instance, any object that has a
+ * and end with the most specific. For instance, any object that has a
* datatype message is a datatype but only some of them are datasets.
*/
const H5O_obj_class_t *const H5O_obj_class_g[] = {
@@ -221,9 +221,9 @@ static herr_t H5O_iterate_real(const H5O_loc_t *loc, const H5O_msg_class_t *type
H5AC_protect_t prot, hbool_t internal, void *op, void *op_data, hid_t dxpl_id);
static H5G_obj_t H5O_obj_type_real(H5O_t *oh);
static const H5O_obj_class_t *H5O_obj_class(H5O_t *oh);
-static void * H5O_copy_mesg_file(const H5O_msg_class_t *type, H5F_t *file_src, void *mesg_src,
+static void * H5O_copy_mesg_file(const H5O_msg_class_t *type, H5F_t *file_src, void *mesg_src,
H5F_t *file_dst, hid_t dxpl_id, H5O_copy_t *cpy_info, void *udata);
-static herr_t H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
+static herr_t H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
hid_t dxpl_id, H5O_copy_t *cpy_info);
static herr_t H5O_copy_free_addrmap_cb(void *item, void *key, void *op_data);
@@ -1674,10 +1674,10 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5O_touch_oh(H5F_t *f,
- hid_t dxpl_id,
- H5O_t *oh,
- hbool_t force,
+H5O_touch_oh(H5F_t *f,
+ hid_t dxpl_id,
+ H5O_t *oh,
+ hbool_t force,
unsigned * oh_flags_ptr)
{
unsigned idx;
@@ -2117,7 +2117,7 @@ done:
} /* end H5O_remove_real() */
-/*-------------------------------------------------------------------------
+/*-------------------------------------------------------------------------
*
* Function: H5O_alloc_msgs
*
@@ -2140,7 +2140,7 @@ H5O_alloc_msgs(H5O_t *oh, size_t min_alloc)
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5O_alloc_msgs)
-
+
/* check args */
HDassert(oh);
@@ -2164,7 +2164,7 @@ done:
} /* H5O_alloc_msgs() */
-/*-------------------------------------------------------------------------
+/*-------------------------------------------------------------------------
*
* Function: H5O_move_msgs_forward
*
@@ -2186,7 +2186,7 @@ H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id)
htri_t ret_value; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5O_move_msgs_forward)
-
+
/* check args */
HDassert(oh);
@@ -2234,14 +2234,14 @@ H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id)
/* Mark messages dirty */
curr_msg->dirty = TRUE;
nonnull_msg->dirty = TRUE;
-
+
/* Set the flag to indicate that the null message
* was packed - if its not at the end its chunk,
* we'll move it again on the next pass.
*/
packed_msg = TRUE;
} /* end if */
-
+
/* Break out of loop */
break;
} /* end if */
@@ -2354,7 +2354,7 @@ done:
} /* H5O_move_msgs_forward() */
-/*-------------------------------------------------------------------------
+/*-------------------------------------------------------------------------
*
* Function: H5O_merge_null
*
@@ -2376,7 +2376,7 @@ H5O_merge_null(H5F_t *f, H5O_t *oh)
htri_t ret_value; /* Return value */
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5O_merge_null)
-
+
/* check args */
HDassert(oh != NULL);
@@ -2459,7 +2459,7 @@ H5O_merge_null(H5F_t *f, H5O_t *oh)
} /* H5O_merge_null() */
-/*-------------------------------------------------------------------------
+/*-------------------------------------------------------------------------
*
* Function: H5O_remove_empty_chunks
*
@@ -2607,7 +2607,7 @@ done:
} /* H5O_remove_empty_chunks() */
-/*-------------------------------------------------------------------------
+/*-------------------------------------------------------------------------
*
* Function: H5O_condense_header
*
@@ -2670,15 +2670,15 @@ done:
} /* H5O_condense_header() */
-/*-------------------------------------------------------------------------
+/*-------------------------------------------------------------------------
*
* Function: H5O_alloc_extend_chunk
*
* Purpose: Attempt to extend a chunk that is allocated on disk.
*
- * If the extension is successful, and if the last message
- * of the chunk is the null message, then that message will
- * be extended with the chunk. Otherwise a new null message
+ * If the extension is successful, and if the last message
+ * of the chunk is the null message, then that message will
+ * be extended with the chunk. Otherwise a new null message
* is created.
*
* f is the file in which the chunk will be written. It is
@@ -2686,7 +2686,7 @@ done:
* this chunk.
*
* Return: TRUE: The chunk has been extended, and *msg_idx
- * contains the message index for null message
+ * contains the message index for null message
* which is large enough to hold size bytes.
*
* FALSE: The chunk cannot be extended, and *msg_idx
@@ -2699,10 +2699,10 @@ done:
*-------------------------------------------------------------------------
*/
static htri_t
-H5O_alloc_extend_chunk(H5F_t *f,
- H5O_t *oh,
- unsigned chunkno,
- size_t size,
+H5O_alloc_extend_chunk(H5F_t *f,
+ H5O_t *oh,
+ unsigned chunkno,
+ size_t size,
unsigned * msg_idx)
{
size_t delta; /* Change in chunk's size */
@@ -2747,7 +2747,7 @@ H5O_alloc_extend_chunk(H5F_t *f,
delta = H5O_ALIGN(delta);
/* determine whether the chunk can be extended */
- tri_result = H5MF_can_extend(f, H5FD_MEM_OHDR, oh->chunk[chunkno].addr,
+ tri_result = H5MF_can_extend(f, H5FD_MEM_OHDR, oh->chunk[chunkno].addr,
(hsize_t)(oh->chunk[chunkno].size), (hsize_t)delta);
if(tri_result == FALSE) { /* can't extend -- we are done */
HGOTO_DONE(FALSE);
@@ -2829,12 +2829,12 @@ done:
/*-------------------------------------------------------------------------
* Function: H5O_alloc_new_chunk
*
- * Purpose: Allocates a new chunk for the object header, including
+ * Purpose: Allocates a new chunk for the object header, including
* file space.
*
- * One of the other chunks will get an object continuation
- * message. If there isn't room in any other chunk for the
- * object continuation message, then some message from
+ * One of the other chunks will get an object continuation
+ * message. If there isn't room in any other chunk for the
+ * object continuation message, then some message from
* another chunk is moved into this chunk to make room.
*
* SIZE need not be aligned.
@@ -2853,9 +2853,9 @@ done:
*-------------------------------------------------------------------------
*/
static unsigned
-H5O_alloc_new_chunk(H5F_t *f,
- hid_t dxpl_id,
- H5O_t *oh,
+H5O_alloc_new_chunk(H5F_t *f,
+ hid_t dxpl_id,
+ H5O_t *oh,
size_t size)
{
size_t cont_size; /*continuation message size */
@@ -2926,9 +2926,9 @@ H5O_alloc_new_chunk(H5F_t *f,
* If we must move some other message to make room for the null
* message, then make sure the new chunk has enough room for that
* other message.
- *
+ *
* Move attributes first, then link messages, then other messages.
- *
+ *
*/
if(found_null < 0) {
if(found_attr >= 0)
@@ -3069,11 +3069,11 @@ done:
*-------------------------------------------------------------------------
*/
static unsigned
-H5O_alloc(H5F_t *f,
- hid_t dxpl_id,
- H5O_t *oh,
- const H5O_msg_class_t *type,
- size_t size,
+H5O_alloc(H5F_t *f,
+ hid_t dxpl_id,
+ H5O_t *oh,
+ const H5O_msg_class_t *type,
+ size_t size,
unsigned * oh_flags_ptr)
{
H5O_mesg_t *msg; /* Pointer to newly allocated message */
@@ -3112,7 +3112,7 @@ H5O_alloc(H5F_t *f,
/* check to see if we can extend one of the chunks. If we can,
* do so. Otherwise, we will have to allocate a new chunk.
*
- * Note that in this new version of this function, all chunks
+ * Note that in this new version of this function, all chunks
* must have file space allocated to them.
*/
for(chunkno = 0; chunkno < oh->nchunks; chunkno++) {
@@ -3122,14 +3122,14 @@ H5O_alloc(H5F_t *f,
tri_result = H5O_alloc_extend_chunk(f, oh, chunkno, size, &idx);
if(tri_result == TRUE)
- break;
+ break;
else if(tri_result == FALSE)
idx = UFAIL;
else
HGOTO_ERROR(H5E_OHDR, H5E_SYSTEM, UFAIL, "H5O_alloc_extend_chunk failed unexpectedly")
} /* end for */
- /* if idx is still UFAIL, we were not able to extend a chunk.
+ /* if idx is still UFAIL, we were not able to extend a chunk.
* Create a new one.
*/
if(idx == UFAIL)
@@ -3902,7 +3902,7 @@ done:
*
* Note: Same algorithm as H5O_obj_type_real()
*
- * Return: Success: An object class
+ * Return: Success: An object class
* Failure: NULL
*
* Programmer: Quincey Koziol
@@ -4019,21 +4019,21 @@ H5O_loc_copy(H5O_loc_t *dst, const H5O_loc_t *src, H5_copy_depth_t depth)
/*-------------------------------------------------------------------------
* Function: H5O_copy_mesg_file
- *
+ *
* Purpose: Copies a message to file. If MESG is is the null pointer then a null
* pointer is returned with no error.
*
* Return: Success: Ptr to the new message
*
* Failure: NULL
- *
- * Programmer: Peter Cao
- * June 4, 2005
- *
+ *
+ * Programmer: Peter Cao
+ * June 4, 2005
+ *
*-------------------------------------------------------------------------
*/
static void *
-H5O_copy_mesg_file(const H5O_msg_class_t *type, H5F_t *file_src, void *native_src,
+H5O_copy_mesg_file(const H5O_msg_class_t *type, H5F_t *file_src, void *native_src,
H5F_t *file_dst, hid_t dxpl_id, H5O_copy_t *cpy_info, void *udata)
{
void *ret_value;
@@ -4069,7 +4069,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
+H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
hid_t dxpl_id, H5O_copy_t *cpy_info)
{
H5O_addr_map_t *addr_map = NULL; /* Address mapping of object copied */
@@ -4129,8 +4129,8 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
/* need to allocate all the chunks for the destination before copy the chunk message
- because continuation chunk message will need to know the chunk address of address of
- continuation block.
+ because continuation chunk message will need to know the chunk address of address of
+ continuation block.
*/
for(chunkno = 0; chunkno < oh_src->nchunks; chunkno++) {
size_t chunk_size = oh_src->chunk[chunkno].size;
@@ -4138,7 +4138,7 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
/* '0th' chunk is preceded by object header prefix */
if(0 == chunkno) {
/* Allocate file space for the first chunk & object header prefix */
- if(HADDR_UNDEF == (addr_new = H5MF_alloc(oloc_dst->file, H5FD_MEM_OHDR, dxpl_id, (hsize_t)hdr_size + chunk_size)))
+ if(HADDR_UNDEF == (addr_new = H5MF_alloc(oloc_dst->file, H5FD_MEM_OHDR, dxpl_id, (hsize_t)hdr_size + chunk_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "file allocation failed for object header")
/* Set first chunk's address */
@@ -4263,12 +4263,12 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
/* Copy the source message */
if(H5O_CONT_ID == mesg_src->type->id) {
- if((mesg_dst->native = H5O_copy_mesg_file(copy_type, oloc_src->file, mesg_src->native,
+ if((mesg_dst->native = H5O_copy_mesg_file(copy_type, oloc_src->file, mesg_src->native,
oloc_dst->file, dxpl_id, cpy_info, oh_dst->chunk)) == NULL)
HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, FAIL, "unable to copy object header message")
} /* end if */
else {
- if((mesg_dst->native = H5O_copy_mesg_file(copy_type, oloc_src->file, mesg_src->native,
+ if((mesg_dst->native = H5O_copy_mesg_file(copy_type, oloc_src->file, mesg_src->native,
oloc_dst->file, dxpl_id, cpy_info, udata)) == NULL)
HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, FAIL, "unable to copy object header message")
} /* end else */
@@ -4301,8 +4301,8 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
if(H5SL_insert(cpy_info->map_list, addr_map, &(addr_map->src_addr)) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert object into skip list")
- /* "post copy" loop over messages, to fix up any messages which require a complete
- * object header for destination object
+ /* "post copy" loop over messages, to fix up any messages which require a complete
+ * object header for destination object
*/
for(mesgno = 0; mesgno < oh_src->nmesgs; mesgno++) {
/* Set up convenience variables */
@@ -4329,7 +4329,7 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
LOAD_NATIVE(oloc_dst->file, dxpl_id, mesg_dst, FAIL)
/* Perform "post copy" operation on messge */
- if((copy_type->post_copy_file)(oloc_src, mesg_src->native, oloc_dst,
+ if((copy_type->post_copy_file)(oloc_src, mesg_src->native, oloc_dst,
mesg_dst->native, &modified, dxpl_id, cpy_info) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, FAIL, "unable to perform 'post copy' operation on message")
@@ -4391,7 +4391,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5O_copy_header_map(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
+H5O_copy_header_map(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
hid_t dxpl_id, H5O_copy_t *cpy_info, hbool_t inc_depth)
{
H5O_addr_map_t *addr_map; /* Address mapping of object copied */
diff --git a/src/H5Oattr.c b/src/H5Oattr.c
index d13216b..f0c15e7 100644
--- a/src/H5Oattr.c
+++ b/src/H5Oattr.c
@@ -640,14 +640,14 @@ done:
* Function: H5O_attr_pre_copy_file
*
* Purpose: Perform any necessary actions before copying message between
- * files for attribute messages.
+ * files for attribute messages.
*
* Return: Success: Non-negative
*
* Failure: Negative
*
* Programmer: Quincey Koziol
- * Monday, June 26, 2006
+ * Monday, June 26, 2006
*
*-------------------------------------------------------------------------
*/
@@ -682,16 +682,16 @@ H5O_attr_pre_copy_file(H5F_t UNUSED *file_src, const H5O_msg_class_t UNUSED *typ
* Failure: NULL
*
* Programmer: Quincey Koziol
- * November 1, 2005
+ * November 1, 2005
*
- * Modifications: Peter Cao
+ * Modifications: Peter Cao
* December 17, 2005
* Datatype conversion for variable length datatype
*
*-------------------------------------------------------------------------
*/
static void *
-H5O_attr_copy_file(H5F_t UNUSED *file_src, void *native_src, H5F_t *file_dst,
+H5O_attr_copy_file(H5F_t UNUSED *file_src, void *native_src, H5F_t *file_dst,
hid_t dxpl_id, H5O_copy_t *cpy_info, void UNUSED *udata)
{
H5A_t *attr_src = (H5A_t *)native_src;
@@ -819,7 +819,7 @@ H5O_attr_copy_file(H5F_t UNUSED *file_src, void *native_src, H5F_t *file_dst,
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to convert between src and mem datatypes")
if(NULL == (tpath_mem_dst = H5T_path_find(dt_mem, attr_dst->dt, NULL, NULL, dxpl_id, FALSE)))
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to convert between mem and dst datatypes")
-
+
/* Determine largest datatype size */
if(0 == (src_dt_size = H5T_get_size(attr_src->dt)))
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to determine datatype size")
@@ -839,39 +839,39 @@ H5O_attr_copy_file(H5F_t UNUSED *file_src, void *native_src, H5F_t *file_dst,
/* Create dataspace for number of elements in buffer */
buf_dim = nelmts;
-
+
/* Create the space and set the initial extent */
if(NULL == (buf_space = H5S_create_simple((unsigned)1, &buf_dim, NULL)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, NULL, "can't create simple dataspace")
-
+
/* Atomize */
if((buf_sid = H5I_register(H5I_DATASPACE, buf_space)) < 0) {
H5S_close(buf_space);
HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, NULL, "unable to register dataspace ID")
} /* end if */
-
+
/* Allocate memory for recclaim buf */
if(NULL == (reclaim_buf = H5FL_BLK_MALLOC(attr_buf, buf_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation NULLed for raw data chunk")
-
+
/* Allocate memory for copying the chunk */
if(NULL == (buf = H5FL_BLK_MALLOC(attr_buf, buf_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation NULLed for raw data chunk")
-
+
HDmemcpy(buf, attr_src->data, attr_src->data_size);
-
+
/* Convert from source file to memory */
if(H5T_convert(tpath_src_mem, tid_src, tid_mem, nelmts, (size_t)0, (size_t)0, buf, NULL, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "datatype conversion NULLed")
-
+
HDmemcpy(reclaim_buf, buf, buf_size);
-
+
/* Convert from memory to destination file */
if(H5T_convert(tpath_mem_dst, tid_mem, tid_dst, nelmts, (size_t)0, (size_t)0, buf, NULL, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "datatype conversion NULLed")
-
+
HDmemcpy(attr_dst->data, buf, attr_dst->data_size);
-
+
if(H5D_vlen_reclaim(tid_mem, buf_space, H5P_DATASET_XFER_DEFAULT, reclaim_buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_BADITER, NULL, "unable to reclaim variable-length data")
} /* type conversion */
diff --git a/src/H5Ocont.c b/src/H5Ocont.c
index 6cb5735..13a9d47 100644
--- a/src/H5Ocont.c
+++ b/src/H5Ocont.c
@@ -41,7 +41,7 @@ static herr_t H5O_cont_encode(H5F_t *f, uint8_t *p, const void *_mesg);
static size_t H5O_cont_size(const H5F_t *f, const void *_mesg);
static herr_t H5O_cont_free(void *mesg);
static herr_t H5O_cont_delete(H5F_t *f, hid_t dxpl_id, const void *_mesg, hbool_t adj_link);
-static void *H5O_cont_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst,
+static void *H5O_cont_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst,
hid_t dxpl_id, H5O_copy_t *cpy_info, void *udata);
static herr_t H5O_cont_debug(H5F_t *f, hid_t dxpl_id, const void *_mesg, FILE * stream,
int indent, int fwidth);
@@ -253,13 +253,13 @@ done:
*
* Failure: NULL
*
- * Programmer: Peter Cao
- * September 22, 2005
+ * Programmer: Peter Cao
+ * September 22, 2005
*
*-------------------------------------------------------------------------
*/
static void *
-H5O_cont_copy_file(H5F_t UNUSED *file_src, void *mesg_src, H5F_t UNUSED *file_dst,
+H5O_cont_copy_file(H5F_t UNUSED *file_src, void *mesg_src, H5F_t UNUSED *file_dst,
hid_t UNUSED dxpl_id, H5O_copy_t UNUSED *cpy_info, void *udata)
{
H5O_cont_t *cont_src = (H5O_cont_t *) mesg_src;
diff --git a/src/H5Odtype.c b/src/H5Odtype.c
index 93470c5..b5b441a 100644
--- a/src/H5Odtype.c
+++ b/src/H5Odtype.c
@@ -610,7 +610,7 @@ H5O_dtype_encode_helper(uint8_t **pp, const H5T_t *dt)
case H5T_ORDER_BE:
flags |= 0x01;
break;
- case H5T_ORDER_VAX: /*turn on 1st and 6th (reserved before adding VAX) bits*/
+ case H5T_ORDER_VAX: /*turn on 1st and 6th (reserved before adding VAX) bits*/
flags |= 0x41;
has_vax = TRUE;
break;
@@ -1211,7 +1211,7 @@ H5O_dtype_set_share(H5F_t UNUSED *f, void *_mesg/*in,out*/,
* Failure: Negative
*
* Programmer: Quincey Koziol
- * November 21, 2005
+ * November 21, 2005
*
*-------------------------------------------------------------------------
*/
diff --git a/src/H5Oefl.c b/src/H5Oefl.c
index afa46f1..cd94d69 100644
--- a/src/H5Oefl.c
+++ b/src/H5Oefl.c
@@ -33,7 +33,7 @@ static herr_t H5O_efl_encode(H5F_t *f, uint8_t *p, const void *_mesg);
static void *H5O_efl_copy(const void *_mesg, void *_dest, unsigned update_flags);
static size_t H5O_efl_size(const H5F_t *f, const void *_mesg);
static herr_t H5O_efl_reset(void *_mesg);
-static void *H5O_efl_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst,
+static void *H5O_efl_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst,
hid_t dxpl_id, H5O_copy_t *cpy_info, void *udata);
static herr_t H5O_efl_debug(H5F_t *f, hid_t dxpl_id, const void *_mesg, FILE * stream,
int indent, int fwidth);
@@ -427,15 +427,15 @@ done:
*
* Failure: NULL
*
- * Programmer: Peter Cao
- * September 29, 2005
+ * Programmer: Peter Cao
+ * September 29, 2005
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
static void *
-H5O_efl_copy_file(H5F_t UNUSED *file_src, void *mesg_src, H5F_t *file_dst,
+H5O_efl_copy_file(H5F_t UNUSED *file_src, void *mesg_src, H5F_t *file_dst,
hid_t dxpl_id, H5O_copy_t UNUSED *cpy_info, void UNUSED *_udata)
{
H5O_efl_t *efl_src = (H5O_efl_t *) mesg_src;
diff --git a/src/H5Olayout.c b/src/H5Olayout.c
index b6008c8..bff1dfb 100644
--- a/src/H5Olayout.c
+++ b/src/H5Olayout.c
@@ -39,7 +39,7 @@ static size_t H5O_layout_size(const H5F_t *f, const void *_mesg);
static herr_t H5O_layout_reset(void *_mesg);
static herr_t H5O_layout_free(void *_mesg);
static herr_t H5O_layout_delete(H5F_t *f, hid_t dxpl_id, const void *_mesg, hbool_t adj_link);
-static void *H5O_layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst,
+static void *H5O_layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst,
hid_t dxpl_id, H5O_copy_t *cpy_info, void *udata);
static herr_t H5O_layout_debug(H5F_t *f, hid_t dxpl_id, const void *_mesg, FILE * stream,
int indent, int fwidth);
@@ -613,15 +613,15 @@ done:
*
* Failure: NULL
*
- * Programmer: Peter Cao
- * July 23, 2005
+ * Programmer: Peter Cao
+ * July 23, 2005
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
static void *
-H5O_layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst, hid_t dxpl_id,
+H5O_layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst, hid_t dxpl_id,
H5O_copy_t UNUSED *cpy_info, void *_udata)
{
H5D_copy_file_ud_t *udata = (H5D_copy_file_ud_t *)_udata; /* Dataset copying user data */
@@ -677,7 +677,7 @@ H5O_layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst, hid_t dxp
layout_dst->u.chunk.addr = HADDR_UNDEF;
/* create chunked layout */
- if(H5D_istore_copy(file_src, layout_src, file_dst, layout_dst,
+ if(H5D_istore_copy(file_src, layout_src, file_dst, layout_dst,
udata->src_dtype, udata->src_pline, dxpl_id) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to copy chunked storage")
diff --git a/src/H5Olinfo.c b/src/H5Olinfo.c
index 1b84191..8017002 100644
--- a/src/H5Olinfo.c
+++ b/src/H5Olinfo.c
@@ -282,7 +282,7 @@ H5O_linfo_free(void *mesg)
*-------------------------------------------------------------------------
*/
static void *
-H5O_linfo_copy_file(H5F_t UNUSED *file_src, void *native_src, H5F_t UNUSED *file_dst,
+H5O_linfo_copy_file(H5F_t UNUSED *file_src, void *native_src, H5F_t UNUSED *file_dst,
hid_t UNUSED dxpl_id, H5O_copy_t *cpy_info, void UNUSED *udata)
{
H5O_linfo_t *linfo_src = (H5O_linfo_t *) native_src;
diff --git a/src/H5Olink.c b/src/H5Olink.c
index bc45326..c7c00ca 100644
--- a/src/H5Olink.c
+++ b/src/H5Olink.c
@@ -47,7 +47,7 @@ static herr_t H5O_link_pre_copy_file(H5F_t *file_src, const H5O_msg_class_t *typ
void *mesg_src, hbool_t *deleted, const H5O_copy_t *cpy_info, void *udata);
static void *H5O_link_copy_file(H5F_t *file_src, void *native_src,
H5F_t *file_dst, hid_t dxpl_id, H5O_copy_t *cpy_info, void *udata);
-static herr_t H5O_link_post_copy_file(const H5O_loc_t *parent_src_oloc, const void *mesg_src, H5O_loc_t *dst_oloc,
+static herr_t H5O_link_post_copy_file(const H5O_loc_t *parent_src_oloc, const void *mesg_src, H5O_loc_t *dst_oloc,
void *mesg_dst, hbool_t *modified, hid_t dxpl_id, H5O_copy_t *cpy_info);
static herr_t H5O_link_debug(H5F_t *f, hid_t dxpl_id, const void *_mesg,
FILE * stream, int indent, int fwidth);
@@ -468,14 +468,14 @@ done:
* Function: H5O_link_pre_copy_file
*
* Purpose: Perform any necessary actions before copying message between
- * files for link messages.
+ * files for link messages.
*
* Return: Success: Non-negative
*
* Failure: Negative
*
* Programmer: Quincey Koziol
- * Monday, June 26, 2006
+ * Monday, June 26, 2006
*
*-------------------------------------------------------------------------
*/
@@ -512,12 +512,12 @@ H5O_link_pre_copy_file(H5F_t UNUSED *file_src, const H5O_msg_class_t UNUSED *typ
* Failure: NULL
*
* Programmer: Quincey Koziol
- * November 7, 2005
+ * November 7, 2005
*
*-------------------------------------------------------------------------
*/
static void *
-H5O_link_copy_file(H5F_t UNUSED *file_src, void *native_src, H5F_t UNUSED *file_dst,
+H5O_link_copy_file(H5F_t UNUSED *file_src, void *native_src, H5F_t UNUSED *file_dst,
hid_t UNUSED dxpl_id, H5O_copy_t UNUSED *cpy_info, void UNUSED *udata)
{
H5O_link_t *link_src = (H5O_link_t *) native_src;
@@ -580,12 +580,12 @@ done:
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
- * November 7, 2005
+ * November 7, 2005
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5O_link_post_copy_file(const H5O_loc_t *parent_src_oloc, const void *mesg_src, H5O_loc_t *dst_oloc, void *mesg_dst,
+static herr_t
+H5O_link_post_copy_file(const H5O_loc_t *parent_src_oloc, const void *mesg_src, H5O_loc_t *dst_oloc, void *mesg_dst,
hbool_t *modified, hid_t dxpl_id, H5O_copy_t *cpy_info)
{
H5O_link_t *link_src = (H5O_link_t *)mesg_src; /* Casting away const OK... -QAK */
diff --git a/src/H5Opkg.h b/src/H5Opkg.h
index cdcfe0f..092de03 100644
--- a/src/H5Opkg.h
+++ b/src/H5Opkg.h
@@ -164,7 +164,7 @@ H5_DLLVAR const H5O_msg_class_t H5O_MSG_DTYPE[1];
H5_DLLVAR const H5O_msg_class_t H5O_MSG_FILL[1];
/* New Fill Value Message. (0x0005) */
-/*
+/*
* The new fill value message is fill value plus
* space allocation time and fill value writing time and whether fill
* value is defined.
@@ -208,7 +208,7 @@ H5_DLLVAR const H5O_msg_class_t H5O_MSG_NAME[1];
H5_DLLVAR const H5O_msg_class_t H5O_MSG_MTIME[1];
/* Shared Object Message. (0x000f) */
-/*
+/*
* This message ID never really appears in an object
* header. Instead, bit 2 of the `Flags' field will be set and the ID field
* will be the ID of the pointed-to message.
@@ -222,7 +222,7 @@ H5_DLLVAR const H5O_msg_class_t H5O_MSG_CONT[1];
H5_DLLVAR const H5O_msg_class_t H5O_MSG_STAB[1];
/* New Modification Time Message. (0x0012) */
-/*
+/*
* The message is just a `time_t'.
*/
H5_DLLVAR const H5O_msg_class_t H5O_MSG_MTIME_NEW[1];
diff --git a/src/H5Opline.c b/src/H5Opline.c
index 9658e07..29cd0fd 100644
--- a/src/H5Opline.c
+++ b/src/H5Opline.c
@@ -435,8 +435,8 @@ H5O_pline_free (void *mesg)
*
* Failure: Negative
*
- * Programmer: Peter Cao
- * December 27, 2005
+ * Programmer: Peter Cao
+ * December 27, 2005
*
*-------------------------------------------------------------------------
*/
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index ee3f5da..1c4108a 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -72,7 +72,7 @@ typedef struct H5O_copy_t {
hbool_t expand_soft_link; /* Flag to expand soft links */
hbool_t expand_ext_link; /* Flag to expand external links */
hbool_t expand_obj_ref; /* Flag to expand object references */
- hbool_t copy_without_attr; /* Flag to not copy attributes */
+ hbool_t copy_without_attr; /* Flag to not copy attributes */
int curr_depth; /* Current depth in hierarchy copied */
int max_depth; /* Maximum depth in hierarchy to copy */
H5SL_t *map_list; /* Skip list to hold address mappings */
@@ -357,9 +357,9 @@ H5_DLL herr_t H5O_get_info(H5O_loc_t *loc, H5O_stat_t *ostat, hid_t dxpl_id);
H5_DLL herr_t H5O_iterate(const H5O_loc_t *loc, unsigned type_id, H5O_operator_t op,
void *op_data, hid_t dxpl_id);
H5_DLL H5G_obj_t H5O_obj_type(H5O_loc_t *loc, hid_t dxpl_id);
-H5_DLL herr_t H5O_copy_header(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
+H5_DLL herr_t H5O_copy_header(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
hid_t dxpl_id, unsigned cpy_option);
-H5_DLL herr_t H5O_copy_header_map(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
+H5_DLL herr_t H5O_copy_header_map(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
hid_t dxpl_id, H5O_copy_t *cpy_info, hbool_t inc_depth);
H5_DLL herr_t H5O_debug_id(unsigned type_id, H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream, int indent, int fwidth);
H5_DLL herr_t H5O_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent,
diff --git a/src/H5Oshared.c b/src/H5Oshared.c
index dc5ea6f..226f300 100644
--- a/src/H5Oshared.c
+++ b/src/H5Oshared.c
@@ -423,12 +423,12 @@ done:
* Failure: NULL
*
* Programmer: Quincey Koziol
- * November 1, 2005
+ * November 1, 2005
*
*-------------------------------------------------------------------------
*/
static void *
-H5O_shared_copy_file(H5F_t UNUSED *file_src, void *native_src, H5F_t *file_dst,
+H5O_shared_copy_file(H5F_t UNUSED *file_src, void *native_src, H5F_t *file_dst,
hid_t dxpl_id, H5O_copy_t *cpy_info, void UNUSED *udata)
{
H5O_shared_t *shared_src = (H5O_shared_t *)native_src;
@@ -470,14 +470,14 @@ done:
* Function: H5O_shared_pre_copy_file
*
* Purpose: Perform any necessary actions before copying message between
- * files for shared messages.
+ * files for shared messages.
*
* Return: Success: Non-negative
*
* Failure: Negative
*
- * Programmer: Peter Cao
- * Saturday, February 11, 2006
+ * Programmer: Peter Cao
+ * Saturday, February 11, 2006
*
*-------------------------------------------------------------------------
*/
@@ -487,7 +487,7 @@ H5O_shared_pre_copy_file(H5F_t *file_src, const H5O_msg_class_t *type,
void *udata)
{
H5O_shared_t *shared_src = (H5O_shared_t *)native_src;
- void *mesg_native = NULL;
+ void *mesg_native = NULL;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5O_shared_pre_copy_file)
diff --git a/src/H5Ostab.c b/src/H5Ostab.c
index 7516afd..d641bc5 100644
--- a/src/H5Ostab.c
+++ b/src/H5Ostab.c
@@ -45,7 +45,7 @@ static herr_t H5O_stab_free(void *_mesg);
static herr_t H5O_stab_delete(H5F_t *f, hid_t dxpl_id, const void *_mesg, hbool_t adj_link);
static void *H5O_stab_copy_file(H5F_t *file_src, void *native_src,
H5F_t *file_dst, hid_t dxpl_id, H5O_copy_t *cpy_info, void *udata);
-static herr_t H5O_stab_post_copy_file(const H5O_loc_t *parent_src_oloc, const void *mesg_src, H5O_loc_t *dst_oloc,
+static herr_t H5O_stab_post_copy_file(const H5O_loc_t *parent_src_oloc, const void *mesg_src, H5O_loc_t *dst_oloc,
void *mesg_dst, hbool_t *modified, hid_t dxpl_id, H5O_copy_t *cpy_info);
static herr_t H5O_stab_debug(H5F_t *f, hid_t dxpl_id, const void *_mesg,
FILE * stream, int indent, int fwidth);
@@ -305,13 +305,13 @@ done:
*
* Failure: NULL
*
- * Programmer: Peter Cao
- * September 10, 2005
+ * Programmer: Peter Cao
+ * September 10, 2005
*
*-------------------------------------------------------------------------
*/
static void *
-H5O_stab_copy_file(H5F_t *file_src, void *native_src, H5F_t *file_dst,
+H5O_stab_copy_file(H5F_t *file_src, void *native_src, H5F_t *file_dst,
hid_t dxpl_id, H5O_copy_t UNUSED *cpy_info, void UNUSED *udata)
{
H5O_stab_t *stab_src = (H5O_stab_t *) native_src;
@@ -356,19 +356,19 @@ done:
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Peter Cao
- * September 28, 2005
+ * Programmer: Peter Cao
+ * September 28, 2005
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5O_stab_post_copy_file(const H5O_loc_t *parent_src_oloc, const void *mesg_src, H5O_loc_t *dst_oloc,
+static herr_t
+H5O_stab_post_copy_file(const H5O_loc_t *parent_src_oloc, const void *mesg_src, H5O_loc_t *dst_oloc,
void *mesg_dst, hbool_t UNUSED *modified, hid_t dxpl_id, H5O_copy_t *cpy_info)
{
H5G_bt_it_ud5_t udata; /* B-tree user data */
const H5O_stab_t *stab_src = (const H5O_stab_t *)mesg_src;
H5O_stab_t *stab_dst = (H5O_stab_t *)mesg_dst;
- H5F_t *file_src = parent_src_oloc->file;
+ H5F_t *file_src = parent_src_oloc->file;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5O_stab_post_copy_file)
diff --git a/src/H5Pocpl.c b/src/H5Pocpl.c
index b00d314..7002b9d 100755
--- a/src/H5Pocpl.c
+++ b/src/H5Pocpl.c
@@ -37,14 +37,14 @@
* unsigned crt_intmd_group; IN: Flag to create intermediate group
* positive value -- to create intermediate group
* otherwise -- do not create intermediate group
- * For example, H5Pset_create_intermediate_group(plist_id, 1) to create intermediate group;
+ * For example, H5Pset_create_intermediate_group(plist_id, 1) to create intermediate group;
*
* Note: XXX: This property should really be an access property. -QAK
* XXX: The property is used only at creation time. It should
* be a creation property. However, the property is not
* saved with the group. In that sense, it should be access
* property. We do not have a good solution for this kind
- * of property. For now, it is used as a creation property.
+ * of property. For now, it is used as a creation property.
* -PXC
*
* Return: Non-negative on success/Negative on failure
@@ -119,18 +119,18 @@ done:
* from one location to another
*
* Usage: H5Pset_copy_group(plist_id, cpy_option)
- * hid_t plist_id; IN: Property list to copy object
+ * hid_t plist_id; IN: Property list to copy object
* unsigned cpy_option; IN: Options to copy object such as
* H5G_COPY_SHALLOW_HIERARCHY_FLAG -- Copy only immediate members
* H5G_COPY_EXPAND_SOFT_LINK_FLAG -- Expand soft links into new objects/
- * H5G_COPY_EXPAND_EXT_LINK_FLAG -- Expand external links into new objects
+ * H5G_COPY_EXPAND_EXT_LINK_FLAG -- Expand external links into new objects
* H5G_COPY_EXPAND_OBJ_REFERENCE_FLAG -- Copy objects that are pointed by references
* H5G_COPY_WITHOUT_ATTR_FLAG -- Copy object without copying attributes
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Peter Cao
- * March 13, 2006
+ * March 13, 2006
*-------------------------------------------------------------------------
*/
herr_t
@@ -163,12 +163,12 @@ done:
* Function: H5Pget_copy_object
*
* Purpose: Returns the cpy_option, which is set for H5Gcopy(hid_t loc_id,
- * const char* name, ... ) for copying objects
+ * const char* name, ... ) for copying objects
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Peter Cao
- * March 13, 2006
+ * March 13, 2006
*-------------------------------------------------------------------------
*/
herr_t
diff --git a/src/H5Shyper.c b/src/H5Shyper.c
index 83bebe9..f1b3f37 100644
--- a/src/H5Shyper.c
+++ b/src/H5Shyper.c
@@ -5350,7 +5350,7 @@ done:
RETURNS
>=0 on success, <0 on failure
DESCRIPTION
- Examine the span tree for a hyperslab selection and rebuild
+ Examine the span tree for a hyperslab selection and rebuild
the start/stride/count/block information for the selection, if possible.
GLOBAL VARIABLES
@@ -5374,7 +5374,7 @@ H5S_hyper_rebuild_helper(const H5S_hyper_span_t *span, H5S_hyper_dim_t span_slab
unsigned u;
H5S_hyper_dim_t canon_down_span_slab_info[H5S_MAX_RANK];
hbool_t ret_value = TRUE;
-
+
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_rebuild_helper)
if(span) {
@@ -5431,7 +5431,7 @@ H5S_hyper_rebuild_helper(const H5S_hyper_span_t *span, H5S_hyper_dim_t span_slab
} /* end if */
/* Obtain values for stride and block */
- next_stride = span->low - curr_low;
+ next_stride = span->low - curr_low;
next_block = (span->high - span->low) + 1;
/* Compare stride and block in this span, to compare stride,
@@ -5478,7 +5478,7 @@ done:
RETURNS
>=0 on success, <0 on failure
DESCRIPTION
- Examine the span tree for a hyperslab selection and rebuild
+ Examine the span tree for a hyperslab selection and rebuild
the start/stride/count/block information for the selection, if possible.
GLOBAL VARIABLES
@@ -5490,7 +5490,7 @@ done:
REVISION LOG
This routine is the optimization of the old version. The previous version
- can only detect a singluar selection. This version is general enough to
+ can only detect a singluar selection. This version is general enough to
detect any regular selection.
KY, 2005/9/22
--------------------------------------------------------------------------*/
@@ -5520,7 +5520,7 @@ H5S_hyper_rebuild(H5S_t *space)
diminfo=space->select.sel_info.hslab->opt_diminfo;
app_diminfo=space->select.sel_info.hslab->app_diminfo;
- for(curr_dim = 0; curr_dim < rank; curr_dim++) {
+ for(curr_dim = 0; curr_dim < rank; curr_dim++) {
app_diminfo[(rank - curr_dim) - 1].start = diminfo[(rank - curr_dim) - 1].start = top_span_slab_info[curr_dim].start;
app_diminfo[(rank - curr_dim) - 1].stride = diminfo[(rank - curr_dim) - 1].stride = top_span_slab_info[curr_dim].stride;
diff --git a/src/H5Stest.c b/src/H5Stest.c
index 5a1d2b0..0e28b3f 100644
--- a/src/H5Stest.c
+++ b/src/H5Stest.c
@@ -98,12 +98,12 @@ H5S_get_rebuild_status_test(hid_t space_id)
FUNC_ENTER_NOAPI(H5S_get_rebuild_status_test, FAIL)
- /* Get dataspace structures */
- if(NULL == (space = H5I_object_verify(space_id, H5I_DATASPACE)))
+ /* Get dataspace structures */
+ if(NULL == (space = H5I_object_verify(space_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace")
ret_value = space->select.sel_info.hslab->diminfo_valid;
-
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_get_rebuild_status_test() */
diff --git a/src/H5T.c b/src/H5T.c
index b955b40..045c42d 100644
--- a/src/H5T.c
+++ b/src/H5T.c
@@ -1289,7 +1289,7 @@ H5T_init_interface(void)
#if H5T_CONV_INTERNAL_FP_ULLONG
status |= H5T_register(H5T_PERS_HARD, "flt_ullong", native_float, native_ullong, H5T_conv_float_ullong, H5AC_dxpl_id, FALSE);
status |= H5T_register(H5T_PERS_HARD, "dbl_ullong", native_double, native_ullong, H5T_conv_double_ullong, H5AC_dxpl_id, FALSE);
-#endif /* H5T_CONV_INTERNAL_FP_ULLONG */
+#endif /* H5T_CONV_INTERNAL_FP_ULLONG */
#if H5T_CONV_INTERNAL_LDOUBLE_ULLONG
status |= H5T_register(H5T_PERS_HARD, "ldbl_ullong", native_ldouble, native_ullong, H5T_conv_ldouble_ullong, H5AC_dxpl_id, FALSE);
#endif /* H5T_CONV_INTERNAL_LDOUBLE_ULLONG */
@@ -2629,11 +2629,11 @@ done:
* Function: H5Tcompiler_conv
*
* Purpose: Finds out whether the library's conversion function from
- * type src_id to type dst_id is a compiler (hard) conversion.
- * A hard conversion uses compiler's casting; a soft conversion
+ * type src_id to type dst_id is a compiler (hard) conversion.
+ * A hard conversion uses compiler's casting; a soft conversion
* uses the library's own conversion function.
*
- * Return: TRUE: hard conversion.
+ * Return: TRUE: hard conversion.
* FALSE: soft conversion.
* FAIL: failed.
*
@@ -4193,11 +4193,11 @@ done:
*
* Modifications:
* Added a parameter IS_API to indicate whether to an API
- * function issued a call to this function. If a API
- * function like H5Tregister() is calling this function to
- * register a new hard conversion function, IS_API is TRUE
- * and the old path is replaced. If a private function like
- * H5T_init_interface() is trying to register hard conversions,
+ * function issued a call to this function. If a API
+ * function like H5Tregister() is calling this function to
+ * register a new hard conversion function, IS_API is TRUE
+ * and the old path is replaced. If a private function like
+ * H5T_init_interface() is trying to register hard conversions,
* IS_API is FALSE and the old hard path is not replaced.
* Tuesday, Sept 13, 2005
*
@@ -4288,9 +4288,9 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
old_npaths=H5T_g.npaths;
/*
- * If we didn't find the path, if the caller is an API function specifying
- * a new hard conversion function, or if the caller is a private function
- * specifying a new hard conversion and the path is a soft conversion, then
+ * If we didn't find the path, if the caller is an API function specifying
+ * a new hard conversion function, or if the caller is a private function
+ * specifying a new hard conversion and the path is a soft conversion, then
* create a new path and add the new function to the path.
*/
if (!table || (table && func && is_api) || (table && !table->is_hard && func && !is_api)) {
@@ -4311,8 +4311,8 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
/*
* If a hard conversion function is specified and none is defined for the
- * path, or the caller is an API function, or the caller is a private function but
- * the existing path is a soft function, then add the new conversion to the path
+ * path, or the caller is an API function, or the caller is a private function but
+ * the existing path is a soft function, then add the new conversion to the path
* and initialize its conversion data.
*/
if (func && (!table || (table && is_api) || (table && !table->is_hard && !is_api))) {
@@ -4504,11 +4504,11 @@ H5T_path_bkg(const H5T_path_t *p)
/*-------------------------------------------------------------------------
* Function: H5T_compiler_conv
*
- * Purpose: Private function for H5Tcompiler_conv. Finds out whether the
- * library's conversion function from type SRC to type DST
+ * Purpose: Private function for H5Tcompiler_conv. Finds out whether the
+ * library's conversion function from type SRC to type DST
* is a hard conversion.
*
- * Return: TRUE: hard conversion.
+ * Return: TRUE: hard conversion.
* FALSE: soft conversion.
* FAIL: function failed.
*
diff --git a/src/H5Tconv.c b/src/H5Tconv.c
index dc56c30..1cb229a 100644
--- a/src/H5Tconv.c
+++ b/src/H5Tconv.c
@@ -551,9 +551,9 @@ H5FL_BLK_DEFINE_STATIC(array_seq);
/* Quincey added the condition branch (else if (*((ST*)S) != (ST)((DT)(*((ST*)S))))).
* It handles a special situation when the source is "float" and assigned the value
- * of "INT_MAX". Compilers do roundup making this value "INT_MAX+1". This branch
+ * of "INT_MAX". Compilers do roundup making this value "INT_MAX+1". This branch
* is to check that situation and return exception for some compilers, mainly GCC.
- * The branch if (*((ST*)S) > (DT)(D_MAX) || (sprec < dprec && *((ST*)S) ==
+ * The branch if (*((ST*)S) > (DT)(D_MAX) || (sprec < dprec && *((ST*)S) ==
* (DT)(D_MAX))) is for some compilers like Sun, HP, IBM, and SGI where under
* the same situation the "int" doesn't overflow. SLU - 2005/9/12
*/
@@ -3697,12 +3697,12 @@ H5T_conv_f_f (hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
for (i = 0; i < tsize; i += 4) {
tmp1 = s[i];
tmp2 = s[i+1];
-
+
s[i] = s[(tsize-2)-i];
s[i+1] = s[(tsize-1)-i];
-
+
s[(tsize-2)-i] = tmp1;
- s[(tsize-1)-i] = tmp2;
+ s[(tsize-1)-i] = tmp2;
}
}
@@ -3790,7 +3790,7 @@ H5T_conv_f_f (hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCONVERT, FAIL, "can't handle conversion exception")
goto padding;
-#ifdef H5_VMS
+#ifdef H5_VMS
} /*Temporary solution to handle VAX special values*/
#else /*H5_VMS*/
} else if (H5T_bit_find (s, src.u.f.epos, src.u.f.esize,
@@ -4054,12 +4054,12 @@ H5T_conv_f_f (hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
for (i = 0; i < tsize; i += 4) {
tmp1 = d[i];
tmp2 = d[i+1];
-
+
d[i] = d[(tsize-2)-i];
d[i+1] = d[(tsize-1)-i];
-
+
d[(tsize-2)-i] = tmp1;
- d[(tsize-1)-i] = tmp2;
+ d[(tsize-1)-i] = tmp2;
}
}
@@ -9415,7 +9415,7 @@ H5T_conv_ldouble_llong (hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata,
done:
FUNC_LEAVE_NOAPI(ret_value);
}
-#endif /*H5T_CONV_INTERNAL_LDOUBLE_LLONG*/
+#endif /*H5T_CONV_INTERNAL_LDOUBLE_LLONG*/
/*-------------------------------------------------------------------------
@@ -9433,7 +9433,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-#if H5T_CONV_INTERNAL_LDOUBLE_ULLONG
+#if H5T_CONV_INTERNAL_LDOUBLE_ULLONG
herr_t
H5T_conv_ldouble_ullong (hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata,
size_t nelmts, size_t buf_stride,
@@ -9628,12 +9628,12 @@ H5T_conv_f_i (hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
for (i = 0; i < tsize; i += 4) {
tmp1 = s[i];
tmp2 = s[i+1];
-
+
s[i] = s[(tsize-2)-i];
s[i+1] = s[(tsize-1)-i];
-
+
s[(tsize-2)-i] = tmp1;
- s[(tsize-1)-i] = tmp2;
+ s[(tsize-1)-i] = tmp2;
}
}
@@ -10058,7 +10058,7 @@ done:
* Wednesday, April 21, 2004
* There is a new design for exception handling like overflow,
* which is passed in as a transfer property.
- *
+ *
* Raymond Lu
* Monday, March 13, 2006
* Added support for VAX floating-point types.
@@ -10083,7 +10083,7 @@ H5T_conv_i_f (hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
uint8_t *src_rev=NULL; /*order-reversed source buffer */
uint8_t dbuf[64]; /*temp destination buffer */
uint8_t tmp1, tmp2; /*temp variables for swapping bytes*/
-
+
/* Conversion-related variables */
hsize_t expo; /*destination exponent */
hsize_t expo_max; /*maximal possible exponent value */
@@ -10428,12 +10428,12 @@ H5T_conv_i_f (hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
for (i = 0; i < tsize; i += 4) {
tmp1 = d[i];
tmp2 = d[i+1];
-
+
d[i] = d[(tsize-2)-i];
d[i+1] = d[(tsize-1)-i];
-
+
d[(tsize-2)-i] = tmp1;
- d[(tsize-1)-i] = tmp2;
+ d[(tsize-1)-i] = tmp2;
}
}
@@ -10473,7 +10473,7 @@ done:
* Function: H5T_reverse_order
*
* Purpose: Internal assisting function to reverse the order of
- * a sequence of byte when it's big endian or VAX order.
+ * a sequence of byte when it's big endian or VAX order.
* The byte sequence simulates the endian order.
*
* Return: Success: A pointer to the reversed byte sequence
@@ -10512,6 +10512,6 @@ H5T_reverse_order(uint8_t *rev, uint8_t *s, size_t size, H5T_order_t order)
for (i=0; i<size; i++)
rev[i] = s[i];
}
-
+
FUNC_LEAVE_NOAPI(SUCCEED);
}
diff --git a/src/H5Tpkg.h b/src/H5Tpkg.h
index ee0939e..d9c519d 100644
--- a/src/H5Tpkg.h
+++ b/src/H5Tpkg.h
@@ -80,14 +80,14 @@
#define H5T_CONV_INTERNAL_FP_LDOUBLE 1
#endif /*H5_SIZEOF_LONG_DOUBLE && H5_CONVERT_DENORMAL_FLOAT*/
-/* Define an internal macro for converting all integers to long double. SGI compilers give some
+/* Define an internal macro for converting all integers to long double. SGI compilers give some
* incorrect conversions. */
#if (H5_WANT_DATA_ACCURACY && H5_INTEGER_TO_LDOUBLE_ACCURATE) || (!H5_WANT_DATA_ACCURACY)
#define H5T_CONV_INTERNAL_INTEGER_LDOUBLE 1
#endif
/* Define an internal macro for converting unsigned (long) long to floating numbers.
- * 64-bit Solaris does different rounding. */
+ * 64-bit Solaris does different rounding. */
#if (H5_WANT_DATA_ACCURACY && H5_ULONG_TO_FP_BOTTOM_BIT_ACCURATE) || (!H5_WANT_DATA_ACCURACY)
#define H5T_CONV_INTERNAL_ULONG_FP 1
#endif
@@ -106,7 +106,7 @@
#define H5T_CONV_INTERNAL_LLONG_LDOUBLE 1
#endif
-/* Define an internal macro for converting unsigned long long to floating numbers. SGI compilers give
+/* Define an internal macro for converting unsigned long long to floating numbers. SGI compilers give
* some incorect conversion. 64-bit Solaris does different rounding. Windows Visual Studio 6 does
* not support unsigned long long. */
#if (H5_WANT_DATA_ACCURACY && H5_ULLONG_TO_FP_CAST_WORKS && H5_ULONG_TO_FP_BOTTOM_BIT_ACCURATE) || \
@@ -114,10 +114,10 @@
#define H5T_CONV_INTERNAL_ULLONG_FP 1
#endif
-/* Define an internal macro for converting unsigned long long to long double. SGI compilers give
+/* Define an internal macro for converting unsigned long long to long double. SGI compilers give
* some incorect conversion. 64-bit Solaris does different rounding. Windows Visual Studio 6 does
- * not support unsigned long long. For FreeBSD(sleipnir), the last 2 bytes of mantissa are lost when
- * compiler tries to do the conversion. For Cygwin, compiler doesn't do rounding correctly.
+ * not support unsigned long long. For FreeBSD(sleipnir), the last 2 bytes of mantissa are lost when
+ * compiler tries to do the conversion. For Cygwin, compiler doesn't do rounding correctly.
* Mac OS 10.4 gives some incorrect result. */
#if (H5_WANT_DATA_ACCURACY && H5_ULLONG_TO_FP_CAST_WORKS && H5_ULONG_TO_FP_BOTTOM_BIT_ACCURATE && \
H5_ULLONG_TO_LDOUBLE_PRECISION && H5_LLONG_TO_LDOUBLE_CORRECT) || (!H5_WANT_DATA_ACCURACY && \
@@ -140,14 +140,14 @@
#define H5T_CONV_INTERNAL_LDOUBLE_UINT 1
#endif
-/* Define an internal macro for converting floating numbers to long long. The hard conversion on Windows
+/* Define an internal macro for converting floating numbers to long long. The hard conversion on Windows
* .NET 2003 has a bug and gives wrong exception value. */
#if (H5_WANT_DATA_ACCURACY && !H5_HW_FP_TO_LLONG_NOT_WORKS) || (!H5_WANT_DATA_ACCURACY)
#define H5T_CONV_INTERNAL_FP_LLONG 1
#endif
/* Define an internal macro for converting long double to long long. SGI compilers give some incorrect
- * conversions. Mac OS 10.4 gives incorrect conversions. HP-UX 11.00 compiler generates floating exception.
+ * conversions. Mac OS 10.4 gives incorrect conversions. HP-UX 11.00 compiler generates floating exception.
* The hard conversion on Windows .NET 2003 has a bug and gives wrong exception value. */
#if (H5_WANT_DATA_ACCURACY && !H5_HW_FP_TO_LLONG_NOT_WORKS && H5_LDOUBLE_TO_INTEGER_ACCURATE && \
H5_LDOUBLE_TO_INTEGER_WORKS && H5_LDOUBLE_TO_LLONG_ACCURATE) || \
@@ -155,8 +155,8 @@
#define H5T_CONV_INTERNAL_LDOUBLE_LLONG 1
#endif
-/* Define an internal macro for converting floating numbers to unsigned long long. PGI compiler does
- * roundup when the source fraction part is greater than 0.5. HP-UX compilers set the maximal number
+/* Define an internal macro for converting floating numbers to unsigned long long. PGI compiler does
+ * roundup when the source fraction part is greater than 0.5. HP-UX compilers set the maximal number
* for unsigned long long as 0x7fffffffffffffff during conversion. */
#if (H5_WANT_DATA_ACCURACY && H5_FP_TO_ULLONG_ACCURATE && H5_FP_TO_ULLONG_RIGHT_MAXIMUM) || \
(!H5_WANT_DATA_ACCURACY)
@@ -165,8 +165,8 @@
#define H5T_CONV_INTERNAL_FP_ULLONG 0
#endif
-/* Define an internal macro for converting long double to unsigned long long. SGI compilers give some
- * incorrect conversions. Mac OS 10.4 gives incorrect conversions. HP-UX 11.00 compiler generates
+/* Define an internal macro for converting long double to unsigned long long. SGI compilers give some
+ * incorrect conversions. Mac OS 10.4 gives incorrect conversions. HP-UX 11.00 compiler generates
* floating exception. */
#if (H5_WANT_DATA_ACCURACY && H5_LDOUBLE_TO_INTEGER_ACCURATE && H5_LDOUBLE_TO_INTEGER_WORKS && \
H5_FP_TO_ULLONG_ACCURATE && H5_FP_TO_ULLONG_RIGHT_MAXIMUM && H5_LDOUBLE_TO_LLONG_ACCURATE) || \
diff --git a/src/H5Vprivate.h b/src/H5Vprivate.h
index 7e2473a..5848a57 100644
--- a/src/H5Vprivate.h
+++ b/src/H5Vprivate.h
@@ -303,7 +303,7 @@ H5V_vector_inc(int n, hsize_t *v1, const hsize_t *v2)
}
/* Lookup table for general log2(n) routine */
-static const char LogTable256[] =
+static const char LogTable256[] =
{
0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
@@ -352,12 +352,12 @@ H5V_log2_gen(hsize_t n)
if((ttt = (unsigned)(n >> 32)))
if((tt = (unsigned)(n >> 48)))
r = (t = (unsigned)(n >> 56)) ? 56 + LogTable256[t] : 48 + LogTable256[tt & 0xFF];
- else
+ else
r = (t = (unsigned)(n >> 40)) ? 40 + LogTable256[t] : 32 + LogTable256[ttt & 0xFF];
else
if((tt = (unsigned)(n >> 16)))
r = (t = (unsigned)(n >> 24)) ? 24 + LogTable256[t] : 16 + LogTable256[tt & 0xFF];
- else
+ else
r = (t = (unsigned)(n >> 8)) ? 8 + LogTable256[t] : LogTable256[n];
return(r);
@@ -365,9 +365,9 @@ H5V_log2_gen(hsize_t n)
/* Lookup table for specialized log2(n) of power of two routine */
-static const unsigned MultiplyDeBruijnBitPosition[32] =
+static const unsigned MultiplyDeBruijnBitPosition[32] =
{
- 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
diff --git a/src/H5private.h b/src/H5private.h
index 39971eb..c216f92 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -803,7 +803,7 @@ H5_DLL int HDrand(void);
#ifdef H5_VMS
#define HDremove(S) HDremove_all(S)
int HDremove_all(char * fname);
-#else
+#else
#define HDremove(S) remove(S)
#endif /*H5_VMS*/
#define HDrename(OLD,NEW) rename(OLD,NEW)