summaryrefslogtreecommitdiffstats
path: root/demos/arthurplugin
ModeNameSize
-rw-r--r--arthur_plugin.qrc183logstatsplain
-rw-r--r--arthurplugin.pro1357logstatsplain
-rw-r--r--bg1.jpg23771logstatsplain
-rw-r--r--flower.jpg49616logstatsplain
-rw-r--r--flower_alpha.jpg67326logstatsplain
-rw-r--r--plugin.cpp8958logstatsplain
value='range'>range
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2006-06-27 14:45:06 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2006-06-27 14:45:06 (GMT)
commit7be3afb278aea67ba09a97f4b41c0aaaf5c47983 (patch)
tree24ed86ab2a5c982fbf182d2ac8cd892c3813bc34
parent8d72542a50fac7a747fe0bfec8d2285de8efd29f (diff)
downloadhdf5-7be3afb278aea67ba09a97f4b41c0aaaf5c47983.zip
hdf5-7be3afb278aea67ba09a97f4b41c0aaaf5c47983.tar.gz
hdf5-7be3afb278aea67ba09a97f4b41c0aaaf5c47983.tar.bz2
[svn-r12440] Purpose:
Code cleanup Description: Trim trailing whitespace in Makefile.am and C/C++ source files to make diffing changes easier. Platforms tested: None necessary, whitespace only change
Diffstat
-rw-r--r--c++/src/H5DataType.cpp2
-rw-r--r--c++/src/H5DcreatProp.cpp12
-rw-r--r--c++/src/H5IdComponent.cpp2
-rw-r--r--c++/src/Makefile.am2
-rw-r--r--c++/test/Makefile.am6
-rw-r--r--c++/test/tattr.cpp2
-rw-r--r--fortran/src/H5match_types.c8
-rw-r--r--fortran/src/Makefile.am2
-rw-r--r--fortran/test/t.c4
-rw-r--r--hl/c++/src/Makefile.am2
-rw-r--r--hl/c++/test/Makefile.am2
-rw-r--r--hl/examples/ex_ds1.c4
-rw-r--r--hl/examples/ex_image1.c16
-rw-r--r--hl/examples/ex_image2.c12
-rw-r--r--hl/examples/ex_lite1.c10
-rw-r--r--hl/examples/ex_lite2.c10
-rw-r--r--hl/examples/ex_lite3.c12
-rw-r--r--hl/examples/ex_table_01.c36
-rw-r--r--hl/examples/ex_table_02.c28
-rw-r--r--hl/examples/ex_table_03.c42
-rw-r--r--hl/examples/ex_table_04.c58
-rw-r--r--hl/examples/ex_table_05.c56
-rw-r--r--hl/examples/ex_table_06.c22
-rw-r--r--hl/examples/ex_table_07.c34
-rw-r--r--hl/examples/ex_table_08.c42
-rw-r--r--hl/examples/ex_table_09.c46
-rw-r--r--hl/examples/ex_table_10.c44
-rw-r--r--hl/examples/ex_table_11.c40
-rw-r--r--hl/examples/ex_table_12.c36
-rw-r--r--hl/fortran/src/Makefile.am6
-rw-r--r--hl/src/H5DS.c50
-rw-r--r--hl/src/H5LT.c54
-rw-r--r--hl/src/H5LTanalyze.c20
-rw-r--r--hl/src/H5LTparse.c50
-rw-r--r--hl/src/H5LTprivate.h4
-rw-r--r--hl/src/H5PTprivate.h2
-rw-r--r--hl/src/H5TB.c34
-rw-r--r--hl/test/test_lite.c170
-rw-r--r--perform/benchpar.c6
-rw-r--r--perform/pio_engine.c32
-rw-r--r--perform/pio_perf.c8
-rw-r--r--perform/pio_standalone.c2
-rw-r--r--perform/pio_timer.c12
-rw-r--r--src/H5.c6
-rw-r--r--src/H5A.c4
-rw-r--r--src/H5AC.c316
-rw-r--r--src/H5ACpkg.h110
-rw-r--r--src/H5ACprivate.h4
-rw-r--r--src/H5ACpublic.h30
-rw-r--r--src/H5B2private.h2
-rw-r--r--src/H5Bprivate.h2
-rw-r--r--src/H5C.c306
-rw-r--r--src/H5CS.c8
-rw-r--r--src/H5Cpkg.h48
-rw-r--r--src/H5Cprivate.h38
-rw-r--r--src/H5D.c2
-rw-r--r--src/H5Dcompact.c6
-rw-r--r--src/H5Dcontig.c4
-rw-r--r--src/H5Dio.c20
-rw-r--r--src/H5Distore.c26
-rw-r--r--src/H5Dmpio.c276
-rw-r--r--src/H5Doh.c2
-rw-r--r--src/H5E.c26
-rw-r--r--src/H5Eterm.h266
-rw-r--r--src/H5F.c6
-rw-r--r--src/H5FDfamily.c2
-rw-r--r--src/H5FDmpi.h4
-rw-r--r--src/H5FDmpio.c36
-rw-r--r--src/H5FDmpiposix.c10
-rw-r--r--src/H5FDmulti.c2
-rw-r--r--src/H5FS.c2
-rw-r--r--src/H5FSprivate.h2
-rw-r--r--src/H5G.c40
-rw-r--r--src/H5Gloc.c2
-rw-r--r--src/H5Gname.c2
-rw-r--r--src/H5Gnode.c6
-rw-r--r--src/H5Gobj.c4
-rw-r--r--src/H5Gpublic.h2
-rw-r--r--src/H5HFcache.c4
-rw-r--r--src/H5HFhdr.c2
-rw-r--r--src/H5HFiter.c2
-rw-r--r--src/H5HFprivate.h2
-rw-r--r--src/H5HL.c20
-rw-r--r--src/H5O.c122
-rw-r--r--src/H5Oattr.c32
-rw-r--r--src/H5Ocont.c8
-rw-r--r--src/H5Odtype.c4
-rw-r--r--src/H5Oefl.c8
-rw-r--r--src/H5Olayout.c10
-rw-r--r--src/H5Olinfo.c2
-rw-r--r--src/H5Olink.c16
-rw-r--r--src/H5Opkg.h6
-rw-r--r--src/H5Opline.c4
-rw-r--r--src/H5Oprivate.h6
-rw-r--r--src/H5Oshared.c12
-rw-r--r--src/H5Ostab.c18
-rwxr-xr-xsrc/H5Pocpl.c14
-rw-r--r--src/H5Shyper.c12
-rw-r--r--src/H5Stest.c6
-rw-r--r--src/H5T.c34
-rw-r--r--src/H5Tconv.c42
-rw-r--r--src/H5Tpkg.h24
-rw-r--r--src/H5Vprivate.h10
-rw-r--r--src/H5private.h2
-rw-r--r--test/Makefile.am2
-rw-r--r--test/cache.c116
-rw-r--r--test/cache_api.c2
-rw-r--r--test/cache_common.c62
-rw-r--r--test/cache_common.h8
-rwxr-xr-xtest/cross_read.c4
-rw-r--r--test/dt_arith.c48
-rw-r--r--test/dtypes.c22
-rw-r--r--test/enum.c4
-rw-r--r--test/fheap.c38
-rw-r--r--test/h5test.c6
-rw-r--r--test/links.c2
-rwxr-xr-xtest/objcopy.c480
-rw-r--r--test/stab.c8
-rw-r--r--test/testframe.c4
-rw-r--r--test/tfile.c18
-rw-r--r--test/tselect.c152
-rw-r--r--test/ttsafe_acreate.c2
-rw-r--r--test/ttsafe_cancel.c4
-rw-r--r--test/tvltypes.c4
-rw-r--r--testpar/t_cache.c550
-rw-r--r--testpar/t_chunk_alloc.c34
-rw-r--r--testpar/t_coll_chunk.c110
-rw-r--r--testpar/t_mpi.c18
-rw-r--r--testpar/t_posix_compliant.c90
-rw-r--r--testpar/t_span_tree.c54
-rw-r--r--testpar/testphdf5.c8
-rw-r--r--tools/h5diff/Makefile.am2
-rw-r--r--tools/h5diff/h5diff_common.c32
-rw-r--r--tools/h5diff/h5diff_main.c10
-rw-r--r--tools/h5diff/h5diffgentest.c22
-rw-r--r--tools/h5dump/h5dump.c20
-rw-r--r--tools/h5dump/h5dumpgentest.c14
-rw-r--r--tools/h5ls/h5ls.c6
-rw-r--r--tools/h5repack/h5repack.c2
-rw-r--r--tools/h5repack/h5repack.h8
-rw-r--r--tools/h5repack/h5repack_copy.c22
-rw-r--r--tools/h5repack/h5repack_filters.c18
-rw-r--r--tools/h5repack/h5repack_list.c2
-rw-r--r--tools/h5repack/h5repack_parse.c4
-rw-r--r--tools/lib/h5diff.c264
-rw-r--r--tools/lib/h5diff_array.c838
-rw-r--r--tools/lib/h5diff_attr.c52
-rw-r--r--tools/lib/h5diff_dset.c20
-rw-r--r--tools/lib/h5diff_util.c14
-rw-r--r--tools/lib/h5tools.c8
-rw-r--r--tools/misc/h5stat.c30
151 files changed, 3162 insertions, 3162 deletions
diff --git a/c++/src/H5DataType.cpp b/c++/src/H5DataType.cpp
index af05632..609e2f9 100644
--- a/c++/src/H5DataType.cpp
+++ b/c++/src/H5DataType.cpp
@@ -198,7 +198,7 @@ void DataType::commit(CommonFG& loc, const H5std_string& name) const
// Function: DataType::committed
///\brief Determines whether a datatype is a named type or a
/// transient type.
-///\return \c true if the datatype is a named type, and \c false,
+///\return \c true if the datatype is a named type, and \c false,
/// otherwise.
///\exception H5::DataTypeIException
// Programmer Binh-Minh Ribler - 2000
diff --git a/c++/src/H5DcreatProp.cpp b/c++/src/H5DcreatProp.cpp
index 5f5bceb..bc9fb03 100644
--- a/c++/src/H5DcreatProp.cpp
+++ b/c++/src/H5DcreatProp.cpp
@@ -262,7 +262,7 @@ H5D_fill_value_t DSetCreatPropList::isFillValueDefined()
/// and the filter fails then the entire I/O operation fails.
// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetCreatPropList::setFilter( H5Z_filter_t filter_id, unsigned int flags,
+void DSetCreatPropList::setFilter( H5Z_filter_t filter_id, unsigned int flags,
size_t cd_nelmts, const unsigned int cd_values[] ) const
{
herr_t ret_value = H5Pset_filter( id, filter_id, flags, cd_nelmts, cd_values );
@@ -329,8 +329,8 @@ int DSetCreatPropList::getNfilters() const
///\par Description
/// Failure occurs when \a filter_number is out of range.
//--------------------------------------------------------------------------
-H5Z_filter_t DSetCreatPropList::getFilter(int filter_number,
- unsigned int &flags, size_t &cd_nelmts, unsigned int* cd_values,
+H5Z_filter_t DSetCreatPropList::getFilter(int filter_number,
+ unsigned int &flags, size_t &cd_nelmts, unsigned int* cd_values,
size_t namelen, char name[], unsigned int& filter_config) const
{
H5Z_filter_t filter_id;
@@ -360,8 +360,8 @@ H5Z_filter_t DSetCreatPropList::getFilter(int filter_number,
///\exception H5::PropListIException
// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetCreatPropList::getFilterById(H5Z_filter_t filter_id,
- unsigned int &flags, size_t &cd_nelmts, unsigned int* cd_values,
+void DSetCreatPropList::getFilterById(H5Z_filter_t filter_id,
+ unsigned int &flags, size_t &cd_nelmts, unsigned int* cd_values,
size_t namelen, char name[], unsigned int &filter_config) const
{
herr_t ret_value = H5Pget_filter_by_id(id, filter_id, &flags, &cd_nelmts,
@@ -394,7 +394,7 @@ void DSetCreatPropList::getFilterById(H5Z_filter_t filter_id,
/// and the filter fails then the entire I/O operation fails.
// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetCreatPropList::modifyFilter( H5Z_filter_t filter_id, unsigned int
+void DSetCreatPropList::modifyFilter( H5Z_filter_t filter_id, unsigned int
flags, size_t cd_nelmts, const unsigned int cd_values[] ) const
{
herr_t ret_value = H5Pmodify_filter(id, filter_id, flags, cd_nelmts, cd_values);
diff --git a/c++/src/H5IdComponent.cpp b/c++/src/H5IdComponent.cpp
index 86f34d1..03c7809 100644
--- a/c++/src/H5IdComponent.cpp
+++ b/c++/src/H5IdComponent.cpp
@@ -255,7 +255,7 @@ H5std_string IdComponent::inMemFunc(const char* func_name) const
{
#ifdef H5_VMS
H5std_string full_name = fromClass();
- full_name.append("::");
+ full_name.append("::");
full_name.append(func_name);
#else
H5std_string full_name = func_name;
diff --git a/c++/src/Makefile.am b/c++/src/Makefile.am
index 924d25c..411b4f5 100644
--- a/c++/src/Makefile.am
+++ b/c++/src/Makefile.am
@@ -15,7 +15,7 @@
## Run automake to generate a Makefile.in from this file.
#
# HDF5-C++ Makefile(.in)
-#
+#
include $(top_srcdir)/config/commence.am
diff --git a/c++/test/Makefile.am b/c++/test/Makefile.am
index 35de101..814c7eb 100644
--- a/c++/test/Makefile.am
+++ b/c++/test/Makefile.am
@@ -15,7 +15,7 @@
## Run automake to generate a Makefile.in from this file.
#
# HDF5-C++ Makefile(.in)
-#
+#
include $(top_srcdir)/config/commence.am
@@ -42,10 +42,10 @@ testhdf5_SOURCES=testhdf5.cpp tattr.cpp tfile.cpp th5s.cpp h5cpputil.cpp
# Tell conclude.am that these are C++ tests.
HDF_CXX=yes
-# Some C++ compilers/linkers (PGI?) create a directory named "ii_files" that
+# Some C++ compilers/linkers (PGI?) create a directory named "ii_files" that
# holds *.ii files, which are template entity instantiations.
# This entire directory should be cleaned.
-mostlyclean-local:
+mostlyclean-local:
@if test -d ii_files; then \
$(RM) -rf ii_files; \
fi
diff --git a/c++/test/tattr.cpp b/c++/test/tattr.cpp
index e327493..7402e30 100644
--- a/c++/test/tattr.cpp
+++ b/c++/test/tattr.cpp
@@ -1012,7 +1012,7 @@ test_attr_delete(void)
/****************************************************************
**
-** test_attr_dtype_shared(): Test code for using shared datatypes
+** test_attr_dtype_shared(): Test code for using shared datatypes
** in attributes.
**
****************************************************************/
diff --git a/fortran/src/H5match_types.c b/fortran/src/H5match_types.c
index 597f467..7d969ad 100644
--- a/fortran/src/H5match_types.c
+++ b/fortran/src/H5match_types.c
@@ -160,7 +160,7 @@ int main()
/* First, define c_int_x */
-#if defined H5_FORTRAN_HAS_INTEGER_1
+#if defined H5_FORTRAN_HAS_INTEGER_1
if(sizeof(long_long) == 1)
writeTypedef("long_long", 1);
else if(sizeof(long) == 1)
@@ -176,7 +176,7 @@ int main()
* is as close as we can get. */
#endif /*H5_FORTRAN_HAS_INTEGER_1 */
-#if defined H5_FORTRAN_HAS_INTEGER_2
+#if defined H5_FORTRAN_HAS_INTEGER_2
if(sizeof(long_long) == 2)
writeTypedef("long_long", 2);
else if(sizeof(long) == 2)
@@ -189,7 +189,7 @@ int main()
writeTypedefDefault(2);
#endif /*H5_FORTRAN_HAS_INTEGER_2 */
-#if defined H5_FORTRAN_HAS_INTEGER_4
+#if defined H5_FORTRAN_HAS_INTEGER_4
if(sizeof(long_long) == 4)
writeTypedef("long_long", 4);
else if(sizeof(long) == 4)
@@ -202,7 +202,7 @@ int main()
writeTypedefDefault(4);
#endif /*H5_FORTRAN_HAS_INTEGER_4 */
-#if defined H5_FORTRAN_HAS_INTEGER_8
+#if defined H5_FORTRAN_HAS_INTEGER_8
if(sizeof(long_long) == 8)
writeTypedef("long_long", 8);
else if(sizeof(long) == 8)
diff --git a/fortran/src/Makefile.am b/fortran/src/Makefile.am
index 2dee250..07f7b84 100644
--- a/fortran/src/Makefile.am
+++ b/fortran/src/Makefile.am
@@ -89,7 +89,7 @@ uninstall-local:
# Also install and uninstall (uninstall-local above) h5fc script
install-exec-local:
- @$(INSTALL) h5fc $(bindir)/$(H5FC_NAME)
+ @$(INSTALL) h5fc $(bindir)/$(H5FC_NAME)
# Install libhdf5_fortran.settings in lib directory
settingsdir=$(libdir)
diff --git a/fortran/test/t.c b/fortran/test/t.c
index 9eede1a..5c4286c 100644
--- a/fortran/test/t.c
+++ b/fortran/test/t.c
@@ -132,7 +132,7 @@ nh5_exit_c(int_f *status)
/*----------------------------------------------------------------------------
* Name: h5_group_revision_c
* Purpose: Checks if H5_GROUP_REVISION variable defined
- * Inputs:
+ * Inputs:
* Returns: 1 if defines, 0 otherwise
* Programmer: Elena Pourmal
* Saturday, May 13, 2006
@@ -142,7 +142,7 @@ int_f
nh5_group_revision_c()
{
int_f ret =0;
-#ifdef H5_GROUP_REVISION
+#ifdef H5_GROUP_REVISION
ret = 1;
#endif
return ret;
diff --git a/hl/c++/src/Makefile.am b/hl/c++/src/Makefile.am
index 63e95b6..32ea8e9 100644
--- a/hl/c++/src/Makefile.am
+++ b/hl/c++/src/Makefile.am
@@ -15,7 +15,7 @@
## Run automake to generate a Makefile.in from this file.
#
# HDF5-C++ Makefile(.in)
-#
+#
include $(top_srcdir)/config/commence.am
diff --git a/hl/c++/test/Makefile.am b/hl/c++/test/Makefile.am
index d438e41..95917f0 100644
--- a/hl/c++/test/Makefile.am
+++ b/hl/c++/test/Makefile.am
@@ -15,7 +15,7 @@
## Run automake to generate a Makefile.in from this file.
#
# HDF5-C++ Makefile(.in)
-#
+#
include $(top_srcdir)/config/commence.am
diff --git a/hl/examples/ex_ds1.c b/hl/examples/ex_ds1.c
index 096a452..e89564d 100644
--- a/hl/examples/ex_ds1.c
+++ b/hl/examples/ex_ds1.c
@@ -59,7 +59,7 @@ int main(void)
if (H5LTmake_dataset_int(fid,DS_2_NAME,rankds,s2_dim,s2_wbuf)<0)
goto out;
-
+
/*-------------------------------------------------------------------------
* attach the DS_1_NAME dimension scale to DSET_NAME at dimension 0
*-------------------------------------------------------------------------
@@ -100,7 +100,7 @@ int main(void)
/* close file */
H5Fclose(fid);
-
+
return 0;
out:
diff --git a/hl/examples/ex_image1.c b/hl/examples/ex_image1.c
index ec7b4c2..a68a5ca 100644
--- a/hl/examples/ex_image1.c
+++ b/hl/examples/ex_image1.c
@@ -37,7 +37,7 @@ int main( void )
252,252,84, /* yellow */
252,168,0, /* orange */
252,0,0}; /* red */
-
+
/* create an image of 9 values divided evenly by the array */
space = WIDTH*HEIGHT / PAL_ENTRIES;
for (i=0, j=0, n=0; i < WIDTH*HEIGHT; i++, j++ )
@@ -50,22 +50,22 @@ int main( void )
}
if (n>PAL_ENTRIES-1) n=0;
}
-
+
/* create a new HDF5 file using default properties. */
file_id = H5Fcreate( "ex_image1.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
-
+
/* make the image */
status = H5IMmake_image_8bit( file_id, "image1", WIDTH, HEIGHT, buf );
-
+
/* make a palette */
status = H5IMmake_palette( file_id, "pallete", pal_dims, pal );
-
+
/* attach the palette to the image */
status = H5IMlink_palette( file_id, "image1", "pallete" );
-
+
/* close the file. */
status = H5Fclose( file_id );
-
+
return 0;
-
+
}
diff --git a/hl/examples/ex_image2.c b/hl/examples/ex_image2.c
index 0b7876d..94b05a5 100644
--- a/hl/examples/ex_image2.c
+++ b/hl/examples/ex_image2.c
@@ -25,7 +25,7 @@
#define PAL_ENTRIES 256
static int read_data(const char* file_name, hsize_t *width, hsize_t *height );
-unsigned char *gbuf = 0; /* global buffer for image data */
+unsigned char *gbuf = 0; /* global buffer for image data */
int main( void )
{
@@ -34,20 +34,20 @@ int main( void )
hsize_t height; /* height of image */
unsigned char pal[ PAL_ENTRIES * 3 ]; /* palette array */
hsize_t pal_dims[2] = {PAL_ENTRIES,3}; /* palette dimensions */
- herr_t status, i, n;
-
+ herr_t status, i, n;
+
/* create a new HDF5 file using default properties. */
file_id = H5Fcreate( "ex_image2.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
/* read first data file */
if (read_data(DATA_FILE1,&width,&height)<0)
goto out;
-
+
/* make the image */
status=H5IMmake_image_8bit( file_id, IMAGE1_NAME, width, height, gbuf );
/*-------------------------------------------------------------------------
- * define a palette, blue to red tones
+ * define a palette, blue to red tones
*-------------------------------------------------------------------------
*/
for ( i=0, n=0; i<PAL_ENTRIES*3; i+=3, n++)
@@ -71,7 +71,7 @@ int main( void )
/* read second data file */
if (read_data(DATA_FILE2,&width,&height)<0)
goto out;
-
+
/* make dataset */
status=H5IMmake_image_24bit( file_id, IMAGE2_NAME, width, height, "INTERLACE_PIXEL", gbuf );
diff --git a/hl/examples/ex_lite1.c b/hl/examples/ex_lite1.c
index 35abb2c..1346036 100644
--- a/hl/examples/ex_lite1.c
+++ b/hl/examples/ex_lite1.c
@@ -21,17 +21,17 @@
int main( void )
{
- hid_t file_id;
+ hid_t file_id;
hsize_t dims[RANK]={2,3};
int data[6]={1,2,3,4,5,6};
herr_t status;
-
+
/* create a HDF5 file */
- file_id = H5Fcreate ("ex_lite1.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
-
+ file_id = H5Fcreate ("ex_lite1.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+
/* create and write an integer type dataset named "dset" */
status = H5LTmake_dataset(file_id,"/dset",RANK,dims,H5T_NATIVE_INT,data);
-
+
/* close file */
status = H5Fclose (file_id);
diff --git a/hl/examples/ex_lite2.c b/hl/examples/ex_lite2.c
index fcb1c9d..2816843 100644
--- a/hl/examples/ex_lite2.c
+++ b/hl/examples/ex_lite2.c
@@ -17,21 +17,21 @@
int main( void )
{
- hid_t file_id;
+ hid_t file_id;
int data[6];
hsize_t dims[2];
herr_t status;
hsize_t i, j, nrow, n_values;
-
+
/* open file from ex_lite1.c */
- file_id = H5Fopen ("ex_lite1.h5", H5F_ACC_RDONLY, H5P_DEFAULT);
+ file_id = H5Fopen ("ex_lite1.h5", H5F_ACC_RDONLY, H5P_DEFAULT);
/* read dataset */
status = H5LTread_dataset_int(file_id,"/dset",data);
/* get the dimensions of the dataset */
status = H5LTget_dataset_info(file_id,"/dset",dims,NULL,NULL);
-
+
/* print it by rows */
n_values = dims[0] * dims[1];
nrow = dims[1];
@@ -41,7 +41,7 @@ int main( void )
printf (" %d", data[i*nrow + j]);
printf ("\n");
}
-
+
/* close file */
status = H5Fclose (file_id);
diff --git a/hl/examples/ex_lite3.c b/hl/examples/ex_lite3.c
index cc6e591..069f56a 100644
--- a/hl/examples/ex_lite3.c
+++ b/hl/examples/ex_lite3.c
@@ -20,16 +20,16 @@
int main( void )
{
- hid_t file_id;
+ hid_t file_id;
hid_t dset_id;
- hid_t space_id;
+ hid_t space_id;
hsize_t dims[1] = { ATTR_SIZE };
int data[ATTR_SIZE] = {1,2,3,4,5};
- herr_t status;
+ herr_t status;
int i;
-
+
/* create a file */
- file_id = H5Fcreate ("ex_lite3.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ file_id = H5Fcreate ("ex_lite3.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
/* create a data space */
space_id = H5Screate_simple(1,dims,NULL);
@@ -45,7 +45,7 @@ int main( void )
* example of H5LTset_attribute_int
*-------------------------------------------------------------------------
*/
-
+
/* create and write the attribute "attr1" on the dataset "dset" */
status = H5LTset_attribute_int(file_id,"dset","attr1",data,ATTR_SIZE);
diff --git a/hl/examples/ex_table_01.c b/hl/examples/ex_table_01.c
index b3771f4..ba1ff57 100644
--- a/hl/examples/ex_table_01.c
+++ b/hl/examples/ex_table_01.c
@@ -19,7 +19,7 @@
/*-------------------------------------------------------------------------
* Table API example
*
- * H5TBmake_table
+ * H5TBmake_table
* H5TBread_table
*
*-------------------------------------------------------------------------
@@ -32,17 +32,17 @@
int main( void )
{
- typedef struct Particle
+ typedef struct Particle
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle;
Particle dst_buf[NRECORDS];
-
+
/* Calculate the size and the offsets of our struct members in memory */
size_t dst_size = sizeof( Particle );
size_t dst_offset[NFIELDS] = { HOFFSET( Particle, name ),
@@ -56,10 +56,10 @@ int main( void )
sizeof( dst_buf[0].longi),
sizeof( dst_buf[0].pressure),
sizeof( dst_buf[0].temperature)};
-
-
+
+
/* Define an array of Particles */
- Particle p_data[NRECORDS] = {
+ Particle p_data[NRECORDS] = {
{"zero",0,0, 0.0f, 0.0},
{"one",10,10, 1.0f, 10.0},
{"two", 20,20, 2.0f, 20.0},
@@ -71,7 +71,7 @@ int main( void )
};
/* Define field information */
- const char *field_names[NFIELDS] =
+ const char *field_names[NFIELDS] =
{ "Name","Latitude", "Longitude", "Pressure", "Temperature" };
hid_t field_type[NFIELDS];
hid_t string_type;
@@ -79,7 +79,7 @@ int main( void )
hsize_t chunk_size = 10;
int *fill_data = NULL;
int compress = 0;
- herr_t status;
+ herr_t status;
int i;
/* Initialize field_type */
@@ -90,21 +90,21 @@ int main( void )
field_type[2] = H5T_NATIVE_INT;
field_type[3] = H5T_NATIVE_FLOAT;
field_type[4] = H5T_NATIVE_DOUBLE;
-
+
/* Create a new file using default properties. */
file_id = H5Fcreate( "ex_table_01.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
/*-------------------------------------------------------------------------
- * H5TBmake_table
+ * H5TBmake_table
*-------------------------------------------------------------------------
*/
- status=H5TBmake_table( "Table Title", file_id, TABLE_NAME,NFIELDS,NRECORDS,
- dst_size,field_names, dst_offset, field_type,
+ status=H5TBmake_table( "Table Title", file_id, TABLE_NAME,NFIELDS,NRECORDS,
+ dst_size,field_names, dst_offset, field_type,
chunk_size, fill_data, compress, p_data );
-
+
/*-------------------------------------------------------------------------
- * H5TBread_table
+ * H5TBread_table
*-------------------------------------------------------------------------
*/
@@ -112,7 +112,7 @@ int main( void )
/* print it by rows */
for (i=0; i<NRECORDS; i++) {
- printf ("%-5s %-5d %-5d %-5f %-5f",
+ printf ("%-5s %-5d %-5d %-5f %-5f",
dst_buf[i].name,
dst_buf[i].lati,
dst_buf[i].longi,
@@ -120,12 +120,12 @@ int main( void )
dst_buf[i].temperature);
printf ("\n");
}
-
+
/*-------------------------------------------------------------------------
* end
*-------------------------------------------------------------------------
*/
-
+
/* Close the file. */
H5Fclose( file_id );
diff --git a/hl/examples/ex_table_02.c b/hl/examples/ex_table_02.c
index acefef3..3580151 100644
--- a/hl/examples/ex_table_02.c
+++ b/hl/examples/ex_table_02.c
@@ -31,19 +31,19 @@
int main( void )
{
- typedef struct Particle
+ typedef struct Particle
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle;
Particle dst_buf[NRECORDS+NRECORDS_ADD];
/* Define an array of Particles */
- Particle p_data[NRECORDS] = {
+ Particle p_data[NRECORDS] = {
{"zero",0,0, 0.0f, 0.0},
{"one",10,10, 1.0f, 10.0},
{"two", 20,20, 2.0f, 20.0},
@@ -67,9 +67,9 @@ int main( void )
sizeof( p_data[0].longi),
sizeof( p_data[0].pressure),
sizeof( p_data[0].temperature)};
-
+
/* Define field information */
- const char *field_names[NFIELDS] =
+ const char *field_names[NFIELDS] =
{ "Name","Latitude", "Longitude", "Pressure", "Temperature" };
hid_t field_type[NFIELDS];
hid_t string_type;
@@ -77,11 +77,11 @@ int main( void )
hsize_t chunk_size = 10;
int *fill_data = NULL;
int compress = 0;
- herr_t status;
+ herr_t status;
int i;
- /* Append particles */
- Particle particle_in[ NRECORDS_ADD ] =
+ /* Append particles */
+ Particle particle_in[ NRECORDS_ADD ] =
{{ "eight",80,80, 8.0f, 80.0},
{"nine",90,90, 9.0f, 90.0} };
@@ -93,17 +93,17 @@ int main( void )
field_type[2] = H5T_NATIVE_INT;
field_type[3] = H5T_NATIVE_FLOAT;
field_type[4] = H5T_NATIVE_DOUBLE;
-
+
/* Create a new file using default properties. */
file_id = H5Fcreate( "ex_table_02.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
/* make a table */
- status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,
- dst_size, field_names, dst_offset, field_type,
+ status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,
+ dst_size, field_names, dst_offset, field_type,
chunk_size, fill_data, compress, p_data );
/* append two records */
- status=H5TBappend_records(file_id, TABLE_NAME,NRECORDS_ADD, dst_size, dst_offset, dst_sizes,
+ status=H5TBappend_records(file_id, TABLE_NAME,NRECORDS_ADD, dst_size, dst_offset, dst_sizes,
&particle_in );
/* read the table */
@@ -111,7 +111,7 @@ int main( void )
/* print it by rows */
for (i=0; i<NRECORDS+NRECORDS_ADD; i++) {
- printf ("%-5s %-5d %-5d %-5f %-5f",
+ printf ("%-5s %-5d %-5d %-5f %-5f",
dst_buf[i].name,
dst_buf[i].lati,
dst_buf[i].longi,
@@ -119,7 +119,7 @@ int main( void )
dst_buf[i].temperature);
printf ("\n");
}
-
+
/* Close the file. */
H5Fclose( file_id );
diff --git a/hl/examples/ex_table_03.c b/hl/examples/ex_table_03.c
index 7357dd2..fe4f401 100644
--- a/hl/examples/ex_table_03.c
+++ b/hl/examples/ex_table_03.c
@@ -32,13 +32,13 @@
int main( void )
{
- typedef struct Particle
+ typedef struct Particle
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle;
Particle dst_buf[NRECORDS];
@@ -50,19 +50,19 @@ int main( void )
HOFFSET( Particle, longi ),
HOFFSET( Particle, pressure ),
HOFFSET( Particle, temperature )};
-
+
Particle p = {"zero",0,0, 0.0f, 0.0};
size_t dst_sizes[NFIELDS] = { sizeof( p.name),
sizeof( p.lati),
sizeof( p.longi),
sizeof( p.pressure),
sizeof( p.temperature)};
-
+
/* Define field information */
- const char *field_names[NFIELDS] =
+ const char *field_names[NFIELDS] =
{ "Name","Latitude", "Longitude", "Pressure", "Temperature" };
- /* Fill value particle */
- Particle fill_data[1] =
+ /* Fill value particle */
+ Particle fill_data[1] =
{ {"no data",-1,-1, -99.0f, -99.0} };
hid_t field_type[NFIELDS];
hid_t string_type;
@@ -73,8 +73,8 @@ int main( void )
herr_t status;
int i;
- /* Define 2 new particles to write */
- Particle particle_in[NRECORDS_WRITE] =
+ /* Define 2 new particles to write */
+ Particle particle_in[NRECORDS_WRITE] =
{ {"zero",0,0, 0.0f, 0.0},
{"one",10,10, 1.0f, 10.0} };
@@ -95,21 +95,21 @@ int main( void )
file_id,
TABLE_NAME,
NFIELDS,
- NRECORDS,
- dst_size,
- field_names,
- dst_offset,
- field_type,
- chunk_size,
- fill_data,
+ NRECORDS,
+ dst_size,
+ field_names,
+ dst_offset,
+ field_type,
+ chunk_size,
+ fill_data,
0, /* no compression */
NULL ); /* no data written */
/* Overwrite 2 records starting at record 0 */
- start = 0;
- nrecords = NRECORDS_WRITE;
- status=H5TBwrite_records( file_id, TABLE_NAME, start, nrecords, dst_size, dst_offset,
+ start = 0;
+ nrecords = NRECORDS_WRITE;
+ status=H5TBwrite_records( file_id, TABLE_NAME, start, nrecords, dst_size, dst_offset,
dst_sizes, particle_in);
/* read the table */
@@ -117,7 +117,7 @@ int main( void )
/* print it by rows */
for (i=0; i<NRECORDS; i++) {
- printf ("%-5s %-5d %-5d %-5f %-5f",
+ printf ("%-5s %-5d %-5d %-5f %-5f",
dst_buf[i].name,
dst_buf[i].lati,
dst_buf[i].longi,
@@ -125,7 +125,7 @@ int main( void )
dst_buf[i].temperature);
printf ("\n");
}
-
+
/* Close the file. */
H5Fclose( file_id );
diff --git a/hl/examples/ex_table_04.c b/hl/examples/ex_table_04.c
index c9559ef..55f4ee8 100644
--- a/hl/examples/ex_table_04.c
+++ b/hl/examples/ex_table_04.c
@@ -30,24 +30,24 @@
int main( void )
{
- typedef struct Particle
+ typedef struct Particle
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle;
/* Define a subset of Particle, with latitude and longitude fields */
- typedef struct Position
+ typedef struct Position
{
int lati;
int longi;
} Position;
/* Define a subset of Particle, with name and pressure fields */
- typedef struct NamePressure
+ typedef struct NamePressure
{
char name[16];
float pressure;
@@ -76,12 +76,12 @@ int main( void )
hid_t string_type;
hid_t file_id;
hsize_t chunk_size = 10;
- Particle fill_data[1] =
- { {"no data",-1,-1, -99.0f, -99.0} }; /* Fill value particle */
+ Particle fill_data[1] =
+ { {"no data",-1,-1, -99.0f, -99.0} }; /* Fill value particle */
hsize_t start; /* Record to start reading/writing */
hsize_t nrecords; /* Number of records to read/write */
int compress = 0;
- herr_t status;
+ herr_t status;
int i;
Particle *p_data = NULL; /* Initially no data */
float pressure_in [NRECORDS] = /* Define new values for the field "Pressure" */
@@ -89,21 +89,21 @@ int main( void )
Position position_in[NRECORDS] = { /* Define new values for "Latitude,Longitude" */
{0,0},
{10,10},
- {20,20},
- {30,30},
+ {20,20},
+ {30,30},
{40,40},
- {50,50},
- {60,60},
+ {50,50},
+ {60,60},
{70,70} };
NamePressure namepre_in[NRECORDS] = /* Define new values for "Name,Pressure" */
{ {"zero",0.0f},
- {"one", 1.0f},
- {"two", 2.0f},
- {"three", 3.0f},
+ {"one", 1.0f},
+ {"two", 2.0f},
+ {"three", 3.0f},
{"four", 4.0f},
{"five", 5.0f},
- {"six", 6.0f},
- {"seven", 7.0f},
+ {"six", 6.0f},
+ {"seven", 7.0f},
};
size_t field_sizes_pos[2]=
{
@@ -111,12 +111,12 @@ int main( void )
sizeof(position_in[0].lati)
};
size_t field_sizes_namepre[2]=
- {
+ {
sizeof(namepre_in[0].name),
sizeof(namepre_in[0].pressure)
};
size_t field_sizes_pre[1]=
- {
+ {
sizeof(namepre_in[0].pressure)
};
@@ -133,20 +133,20 @@ int main( void )
file_id = H5Fcreate( "ex_table_04.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
/* Make the table */
- status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,
- dst_size,field_names, dst_offset, field_type,
+ status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,
+ dst_size,field_names, dst_offset, field_type,
chunk_size, fill_data, compress, p_data );
/* Write the pressure field starting at record 2 */
- start = 2;
- nrecords = 3;
- status=H5TBwrite_fields_name( file_id, TABLE_NAME, "Pressure", start, nrecords,
+ start = 2;
+ nrecords = 3;
+ status=H5TBwrite_fields_name( file_id, TABLE_NAME, "Pressure", start, nrecords,
sizeof( float ), 0, field_sizes_pre, pressure_in );
/* Write the new longitude and latitude information starting at record 2 */
- start = 2;
- nrecords = 3;
- status=H5TBwrite_fields_name( file_id, TABLE_NAME, "Latitude,Longitude", start, nrecords,
+ start = 2;
+ nrecords = 3;
+ status=H5TBwrite_fields_name( file_id, TABLE_NAME, "Latitude,Longitude", start, nrecords,
sizeof( Position ), field_offset_pos, field_sizes_pos, position_in );
/* read the table */
@@ -154,7 +154,7 @@ int main( void )
/* print it by rows */
for (i=0; i<NRECORDS; i++) {
- printf ("%-5s %-5d %-5d %-5f %-5f",
+ printf ("%-5s %-5d %-5d %-5f %-5f",
dst_buf[i].name,
dst_buf[i].lati,
dst_buf[i].longi,
@@ -162,12 +162,12 @@ int main( void )
dst_buf[i].temperature);
printf ("\n");
}
-
+
/*-------------------------------------------------------------------------
* end
*-------------------------------------------------------------------------
*/
-
+
/* Close the file. */
H5Fclose( file_id );
diff --git a/hl/examples/ex_table_05.c b/hl/examples/ex_table_05.c
index 62c231d..b39ef1d 100644
--- a/hl/examples/ex_table_05.c
+++ b/hl/examples/ex_table_05.c
@@ -31,29 +31,29 @@
int main( void )
{
- typedef struct Particle
+ typedef struct Particle
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle;
/* Define a subset of Particle, with latitude and longitude fields */
- typedef struct Position
+ typedef struct Position
{
int lati;
int longi;
} Position;
/* Define a subset of Particle, with name and pressure fields */
- typedef struct NamePressure
+ typedef struct NamePressure
{
char name[16];
float pressure;
} NamePressure;
-
+
/* Calculate the type_size and the offsets of our struct members */
Particle dst_buf[NRECORDS];
size_t dst_size = sizeof( Particle );
@@ -70,28 +70,28 @@ int main( void )
size_t field_offset_pos[2] = { HOFFSET( Position, lati ),
HOFFSET( Position, longi )};
-
+
/* Initially no data */
Particle *p_data = NULL;
/* Define field information */
- const char *field_names[NFIELDS] =
+ const char *field_names[NFIELDS] =
{ "Name","Latitude", "Longitude", "Pressure", "Temperature" };
hid_t field_type[NFIELDS];
hid_t string_type;
hid_t file_id;
hsize_t chunk_size = 10;
- Particle fill_data[1] =
- { {"no data",-1,-1, -99.0f, -99.0} }; /* Fill value particle */
+ Particle fill_data[1] =
+ { {"no data",-1,-1, -99.0f, -99.0} }; /* Fill value particle */
int compress = 0;
hsize_t nfields;
hsize_t start; /* Record to start reading/writing */
hsize_t nrecords; /* Number of records to read/write */
- herr_t status;
+ herr_t status;
int i;
/* Define new values for the field "Pressure" */
- float pressure_in [NRECORDS] =
+ float pressure_in [NRECORDS] =
{ 0.0f,1.0f,2.0f,3.0f,4.0f,5.0f,6.0f,7.0f };
int field_index_pre[1] = { 3 };
@@ -100,11 +100,11 @@ int main( void )
/* Define new values for the fields "Latitude,Longitude" */
Position position_in[NRECORDS] = { {0,0},
{10,10},
- {20,20},
- {30,30},
+ {20,20},
+ {30,30},
{40,40},
- {50,50},
- {60,60},
+ {50,50},
+ {60,60},
{70,70} };
size_t field_sizes_pos[2]=
@@ -112,9 +112,9 @@ int main( void )
sizeof(position_in[0].longi),
sizeof(position_in[0].lati)
};
-
+
size_t field_sizes_pre[1]=
- {
+ {
sizeof(float)
};
@@ -126,27 +126,27 @@ int main( void )
field_type[2] = H5T_NATIVE_INT;
field_type[3] = H5T_NATIVE_FLOAT;
field_type[4] = H5T_NATIVE_DOUBLE;
-
+
/* Create a new file using default properties. */
file_id = H5Fcreate( "ex_table_05.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
/* Make the table */
- status=H5TBmake_table( "Table Title", file_id, TABLE_NAME,NFIELDS,NRECORDS,
- dst_size,field_names, dst_offset, field_type,
+ status=H5TBmake_table( "Table Title", file_id, TABLE_NAME,NFIELDS,NRECORDS,
+ dst_size,field_names, dst_offset, field_type,
chunk_size, fill_data, compress, p_data );
/* Write the pressure field starting at record 2 */
nfields = 1;
- start = 2;
- nrecords = 3;
- status=H5TBwrite_fields_index( file_id, TABLE_NAME, nfields, field_index_pre, start, nrecords,
+ start = 2;
+ nrecords = 3;
+ status=H5TBwrite_fields_index( file_id, TABLE_NAME, nfields, field_index_pre, start, nrecords,
sizeof( float ), 0, field_sizes_pre, pressure_in );
/* Write the new longitude and latitude information starting at record 2 */
nfields = 2;
- start = 2;
- nrecords = 3;
- status=H5TBwrite_fields_index( file_id, TABLE_NAME, nfields, field_index_pos, start, nrecords,
+ start = 2;
+ nrecords = 3;
+ status=H5TBwrite_fields_index( file_id, TABLE_NAME, nfields, field_index_pos, start, nrecords,
sizeof( Position ), field_offset_pos, field_sizes_pos, position_in );
/* read the table */
@@ -154,7 +154,7 @@ int main( void )
/* print it by rows */
for (i=0; i<NRECORDS; i++) {
- printf ("%-5s %-5d %-5d %-5f %-5f",
+ printf ("%-5s %-5d %-5d %-5f %-5f",
dst_buf[i].name,
dst_buf[i].lati,
dst_buf[i].longi,
@@ -162,7 +162,7 @@ int main( void )
dst_buf[i].temperature);
printf ("\n");
}
-
+
/* Close the file. */
H5Fclose( file_id );
diff --git a/hl/examples/ex_table_06.c b/hl/examples/ex_table_06.c
index 377afc3..a0d6017 100644
--- a/hl/examples/ex_table_06.c
+++ b/hl/examples/ex_table_06.c
@@ -30,13 +30,13 @@
int main( void )
{
- typedef struct Particle
+ typedef struct Particle
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle;
/* Calculate the size and the offsets of our struct members in memory */
@@ -46,20 +46,20 @@ int main( void )
HOFFSET( Particle, longi ),
HOFFSET( Particle, pressure ),
HOFFSET( Particle, temperature )};
-
+
/* Define field information */
- const char *field_names[NFIELDS] =
+ const char *field_names[NFIELDS] =
{ "Name","Latitude", "Longitude", "Pressure", "Temperature" };
hid_t field_type[NFIELDS];
hid_t string_type;
hid_t file_id;
hsize_t chunk_size = 10;
- Particle fill_data[1] =
- { {"no data",-1,-1, -99.0f, -99.0} }; /* Fill value particle */
+ Particle fill_data[1] =
+ { {"no data",-1,-1, -99.0f, -99.0} }; /* Fill value particle */
int compress = 0;
hsize_t nfields_out;
hsize_t nrecords_out;
- herr_t status;
+ herr_t status;
/* Initialize field_type */
string_type = H5Tcopy( H5T_C_S1 );
@@ -69,13 +69,13 @@ int main( void )
field_type[2] = H5T_NATIVE_INT;
field_type[3] = H5T_NATIVE_FLOAT;
field_type[4] = H5T_NATIVE_DOUBLE;
-
+
/* Create a new file using default properties. */
file_id = H5Fcreate( "ex_table_06.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
/* Make a table */
- status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,dst_size,
- field_names, dst_offset, field_type,
+ status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,dst_size,
+ field_names, dst_offset, field_type,
chunk_size, fill_data, compress, NULL);
/* Get table info */
@@ -83,7 +83,7 @@ int main( void )
/* print */
printf ("Table has %d fields and %d records\n",(int)nfields_out,(int)nrecords_out);
-
+
/* Close the file. */
H5Fclose( file_id );
diff --git a/hl/examples/ex_table_07.c b/hl/examples/ex_table_07.c
index eb365b2..2d80967 100644
--- a/hl/examples/ex_table_07.c
+++ b/hl/examples/ex_table_07.c
@@ -30,13 +30,13 @@
int main( void )
{
- typedef struct Particle
+ typedef struct Particle
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle;
/* Calculate the size and the offsets of our struct members in memory */
@@ -46,9 +46,9 @@ int main( void )
HOFFSET( Particle, longi ),
HOFFSET( Particle, pressure ),
HOFFSET( Particle, temperature )};
-
+
/* Define an array of Particles */
- Particle p_data[NRECORDS] = {
+ Particle p_data[NRECORDS] = {
{"zero",0,0, 0.0f, 0.0},
{"one",10,10, 1.0f, 10.0},
{"two", 20,20, 2.0f, 20.0},
@@ -66,14 +66,14 @@ int main( void )
hid_t file_id;
hsize_t chunk_size = 10;
int compress = 0;
- Particle fill_data[1] =
+ Particle fill_data[1] =
{ {"no data",-1,-1, -99.0f, -99.0} };
hsize_t start; /* Record to start reading */
hsize_t nrecords; /* Number of records to insert/delete */
hsize_t nfields_out;
hsize_t nrecords_out;
- herr_t status;
-
+ herr_t status;
+
/* Initialize the field field_type */
string_type = H5Tcopy( H5T_C_S1 );
H5Tset_size( string_type, 16 );
@@ -82,18 +82,18 @@ int main( void )
field_type[2] = H5T_NATIVE_INT;
field_type[3] = H5T_NATIVE_FLOAT;
field_type[4] = H5T_NATIVE_DOUBLE;
-
+
/* Create a new file using default properties. */
file_id = H5Fcreate( "ex_table_07.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
-
+
/* Make the table */
- status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,
- dst_size,field_names, dst_offset, field_type,
+ status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,
+ dst_size,field_names, dst_offset, field_type,
chunk_size, fill_data, compress, p_data );
-
+
/* Delete records */
- start = 3;
- nrecords = 3;
+ start = 3;
+ nrecords = 3;
status=H5TBdelete_record( file_id, TABLE_NAME, start, nrecords );
/* Get table info */
@@ -101,11 +101,11 @@ int main( void )
/* print */
printf ("Table has %d fields and %d records\n",(int)nfields_out,(int)nrecords_out);
-
+
/* Close the file. */
H5Fclose( file_id );
-
+
return 0;
-
+
}
diff --git a/hl/examples/ex_table_08.c b/hl/examples/ex_table_08.c
index 8ce2881..fb6cab9 100644
--- a/hl/examples/ex_table_08.c
+++ b/hl/examples/ex_table_08.c
@@ -31,19 +31,19 @@
int main( void )
{
- typedef struct Particle
+ typedef struct Particle
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle;
Particle dst_buf[ NRECORDS + NRECORDS_INS ];
/* Define an array of Particles */
- Particle p_data[NRECORDS] = {
+ Particle p_data[NRECORDS] = {
{"zero",0,0, 0.0f, 0.0},
{"one",10,10, 1.0f, 10.0},
{"two", 20,20, 2.0f, 20.0},
@@ -53,7 +53,7 @@ int main( void )
{"six", 60,60, 6.0f, 60.0},
{"seven",70,70, 7.0f, 70.0}
};
-
+
/* Calculate the size and the offsets of our struct members in memory */
size_t dst_size = sizeof( Particle );
size_t dst_offset[NFIELDS] = { HOFFSET( Particle, name ),
@@ -66,15 +66,15 @@ int main( void )
sizeof( p_data[0].longi),
sizeof( p_data[0].pressure),
sizeof( p_data[0].temperature)};
-
+
/* Define an array of Particles to insert */
- Particle p_data_insert[NRECORDS_INS] =
+ Particle p_data_insert[NRECORDS_INS] =
{ {"new",30,30, 3.0f, 30.0},
{"new",40,40, 4.0f, 40.0}
};
-
+
/* Define field information */
- const char *field_names[NFIELDS] =
+ const char *field_names[NFIELDS] =
{ "Name","Latitude", "Longitude", "Pressure", "Temperature" };
hid_t field_type[NFIELDS];
hid_t string_type;
@@ -88,7 +88,7 @@ int main( void )
hsize_t nfields_out;
hsize_t nrecords_out;
int i;
-
+
/* Initialize the field field_type */
string_type = H5Tcopy( H5T_C_S1 );
H5Tset_size( string_type, 16 );
@@ -97,19 +97,19 @@ int main( void )
field_type[2] = H5T_NATIVE_INT;
field_type[3] = H5T_NATIVE_FLOAT;
field_type[4] = H5T_NATIVE_DOUBLE;
-
+
/* Create a new file using default properties. */
file_id = H5Fcreate( "ex_table_08.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
-
+
/* Make the table */
- status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,
- dst_size,field_names, dst_offset, field_type,
+ status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,
+ dst_size,field_names, dst_offset, field_type,
chunk_size, fill_data, compress, p_data );
/* Insert records */
- start = 3;
- nrecords = NRECORDS_INS;
- status=H5TBinsert_record( file_id, TABLE_NAME, start, nrecords, dst_size, dst_offset,
+ start = 3;
+ nrecords = NRECORDS_INS;
+ status=H5TBinsert_record( file_id, TABLE_NAME, start, nrecords, dst_size, dst_offset,
dst_sizes, p_data_insert );
/* read the table */
@@ -120,10 +120,10 @@ int main( void )
/* print */
printf ("Table has %d fields and %d records\n",(int)nfields_out,(int)nrecords_out);
-
+
/* print it by rows */
for (i=0; i<nrecords_out; i++) {
- printf ("%-5s %-5d %-5d %-5f %-5f",
+ printf ("%-5s %-5d %-5d %-5f %-5f",
dst_buf[i].name,
dst_buf[i].lati,
dst_buf[i].longi,
@@ -131,12 +131,12 @@ int main( void )
dst_buf[i].temperature);
printf ("\n");
}
-
+
/* Close the file. */
H5Fclose( file_id );
-
+
return 0;
-
+
}
diff --git a/hl/examples/ex_table_09.c b/hl/examples/ex_table_09.c
index 21d1813..b02568c 100644
--- a/hl/examples/ex_table_09.c
+++ b/hl/examples/ex_table_09.c
@@ -32,13 +32,13 @@
int main( void )
{
- typedef struct Particle
+ typedef struct Particle
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle;
Particle dst_buf[ NRECORDS + NRECORDS_INS ];
@@ -57,7 +57,7 @@ int main( void )
sizeof( dst_buf[0].temperature)};
/* Define an array of Particles */
- Particle p_data[NRECORDS] = {
+ Particle p_data[NRECORDS] = {
{"zero",0,0, 0.0f, 0.0},
{"one",10,10, 1.0f, 10.0},
{"two", 20,20, 2.0f, 20.0},
@@ -69,15 +69,15 @@ int main( void )
};
/* Define field information */
- const char *field_names[NFIELDS] =
+ const char *field_names[NFIELDS] =
{ "Name","Latitude", "Longitude", "Pressure", "Temperature" };
hid_t field_type[NFIELDS];
hid_t string_type;
hid_t file_id;
hsize_t chunk_size = 10;
int compress = 0;
- Particle fill_data[1] =
- { {"no data",-1,-1, -99.0f, -99.0} }; /* Fill value particle */
+ Particle fill_data[1] =
+ { {"no data",-1,-1, -99.0f, -99.0} }; /* Fill value particle */
hsize_t start1; /* Record to start reading from 1st table */
hsize_t nrecords; /* Number of records to insert */
hsize_t start2; /* Record to start writing in 2nd table */
@@ -85,7 +85,7 @@ int main( void )
int i;
hsize_t nfields_out;
hsize_t nrecords_out;
-
+
/* Initialize the field field_type */
string_type = H5Tcopy( H5T_C_S1 );
H5Tset_size( string_type, 16 );
@@ -94,24 +94,24 @@ int main( void )
field_type[2] = H5T_NATIVE_INT;
field_type[3] = H5T_NATIVE_FLOAT;
field_type[4] = H5T_NATIVE_DOUBLE;
-
+
/* Create a new file using default properties. */
file_id = H5Fcreate( "ex_table_09.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
-
+
/* Make 2 tables: TABLE2_NAME is empty */
- status=H5TBmake_table( "Table Title",file_id,TABLE1_NAME,NFIELDS,NRECORDS,
- dst_size,field_names, dst_offset, field_type,
+ status=H5TBmake_table( "Table Title",file_id,TABLE1_NAME,NFIELDS,NRECORDS,
+ dst_size,field_names, dst_offset, field_type,
chunk_size, fill_data, compress, p_data );
-
- status=H5TBmake_table( "Table Title",file_id,TABLE2_NAME,NFIELDS,NRECORDS,
- dst_size,field_names, dst_offset, field_type,
+
+ status=H5TBmake_table( "Table Title",file_id,TABLE2_NAME,NFIELDS,NRECORDS,
+ dst_size,field_names, dst_offset, field_type,
chunk_size, fill_data, compress, NULL );
-
-
+
+
/* Add 2 records from TABLE1_NAME to TABLE2_NAME */
- start1 = 3;
- nrecords = NRECORDS_INS;
- start2 = 6;
+ start1 = 3;
+ nrecords = NRECORDS_INS;
+ start2 = 6;
status=H5TBadd_records_from( file_id, TABLE1_NAME, start1, nrecords, TABLE2_NAME, start2 );
/* read TABLE2_NAME: it should have 2 more records now */
@@ -122,10 +122,10 @@ int main( void )
/* print */
printf ("Table has %d fields and %d records\n",(int)nfields_out,(int)nrecords_out);
-
+
/* print it by rows */
for (i=0; i<nrecords_out; i++) {
- printf ("%-5s %-5d %-5d %-5f %-5f",
+ printf ("%-5s %-5d %-5d %-5f %-5f",
dst_buf[i].name,
dst_buf[i].lati,
dst_buf[i].longi,
@@ -133,10 +133,10 @@ int main( void )
dst_buf[i].temperature);
printf ("\n");
}
-
+
/* Close the file. */
H5Fclose( file_id );
-
+
return 0;
}
diff --git a/hl/examples/ex_table_10.c b/hl/examples/ex_table_10.c
index 5483d52..5e87914 100644
--- a/hl/examples/ex_table_10.c
+++ b/hl/examples/ex_table_10.c
@@ -32,17 +32,17 @@
int main( void )
{
- typedef struct Particle
+ typedef struct Particle
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle;
/* Define an array of Particles */
- Particle p_data[NRECORDS] = {
+ Particle p_data[NRECORDS] = {
{"zero",0,0, 0.0f, 0.0},
{"one",10,10, 1.0f, 10.0},
{"two", 20,20, 2.0f, 20.0},
@@ -66,10 +66,10 @@ int main( void )
sizeof( dst_buf[0].longi),
sizeof( dst_buf[0].pressure),
sizeof( dst_buf[0].temperature)};
-
-
+
+
/* Define field information */
- const char *field_names[NFIELDS] =
+ const char *field_names[NFIELDS] =
{ "Name","Latitude", "Longitude", "Pressure", "Temperature" };
hid_t field_type[NFIELDS];
hid_t string_type;
@@ -77,11 +77,11 @@ int main( void )
hsize_t chunk_size = 10;
int compress = 0;
int *fill_data = NULL;
- herr_t status;
+ herr_t status;
hsize_t nfields_out;
hsize_t nrecords_out;
int i;
-
+
/* Initialize the field field_type */
string_type = H5Tcopy( H5T_C_S1 );
H5Tset_size( string_type, 16 );
@@ -90,34 +90,34 @@ int main( void )
field_type[2] = H5T_NATIVE_INT;
field_type[3] = H5T_NATIVE_FLOAT;
field_type[4] = H5T_NATIVE_DOUBLE;
-
+
/* Create a new file using default properties. */
file_id = H5Fcreate( "ex_table_10.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
-
+
/* Make two tables */
- status=H5TBmake_table( "Table Title",file_id,TABLE1_NAME,NFIELDS,NRECORDS,
- dst_size,field_names, dst_offset, field_type,
+ status=H5TBmake_table( "Table Title",file_id,TABLE1_NAME,NFIELDS,NRECORDS,
+ dst_size,field_names, dst_offset, field_type,
chunk_size, fill_data, compress, p_data );
-
- status=H5TBmake_table( "Table Title",file_id,TABLE2_NAME,NFIELDS,NRECORDS,
- dst_size,field_names, dst_offset, field_type,
+
+ status=H5TBmake_table( "Table Title",file_id,TABLE2_NAME,NFIELDS,NRECORDS,
+ dst_size,field_names, dst_offset, field_type,
chunk_size, fill_data, compress, p_data );
-
+
/* Combine the two tables into a third in the same file */
status=H5TBcombine_tables( file_id, TABLE1_NAME, file_id, TABLE2_NAME, TABLE3_NAME );
/* read the combined table */
status=H5TBread_table( file_id, TABLE3_NAME, dst_size, dst_offset, dst_sizes, dst_buf );
-
+
/* Get table info */
status=H5TBget_table_info (file_id,TABLE3_NAME, &nfields_out, &nrecords_out );
-
+
/* print */
printf ("Table has %d fields and %d records\n",(int)nfields_out,(int)nrecords_out);
-
+
/* print it by rows */
for (i=0; i<nrecords_out; i++) {
- printf ("%-5s %-5d %-5d %-5f %-5f",
+ printf ("%-5s %-5d %-5d %-5f %-5f",
dst_buf[i].name,
dst_buf[i].lati,
dst_buf[i].longi,
@@ -125,10 +125,10 @@ int main( void )
dst_buf[i].temperature);
printf ("\n");
}
-
+
/* Close the file */
H5Fclose( file_id );
-
+
return 0;
}
diff --git a/hl/examples/ex_table_11.c b/hl/examples/ex_table_11.c
index d082f65..52aa922 100644
--- a/hl/examples/ex_table_11.c
+++ b/hl/examples/ex_table_11.c
@@ -30,17 +30,17 @@
int main( void )
{
- typedef struct Particle1
+ typedef struct Particle1
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle1;
-
+
/* Define an array of Particles */
- Particle1 p_data[NRECORDS] = {
+ Particle1 p_data[NRECORDS] = {
{"zero",0,0, 0.0f, 0.0},
{"one",10,10, 1.0f, 10.0},
{"two", 20,20, 2.0f, 20.0},
@@ -50,7 +50,7 @@ int main( void )
{"six", 60,60, 6.0f, 60.0},
{"seven",70,70, 7.0f, 70.0}
};
-
+
/* Calculate the size and the offsets of our struct members in memory */
size_t dst_size1 = sizeof( Particle1 );
size_t dst_offset1[NFIELDS] = { HOFFSET( Particle1, name ),
@@ -58,9 +58,9 @@ int main( void )
HOFFSET( Particle1, longi ),
HOFFSET( Particle1, pressure ),
HOFFSET( Particle1, temperature )};
-
+
/* Define field information */
- const char *field_names[NFIELDS] =
+ const char *field_names[NFIELDS] =
{ "Name","Latitude", "Longitude", "Pressure", "Temperature" };
hid_t field_type[NFIELDS];
hid_t string_type;
@@ -70,14 +70,14 @@ int main( void )
Particle1 fill_data[1] = { "no data",-1,-1, -99.0f, -99.0 };
int fill_data_new[1] = { -100 };
hsize_t position;
- herr_t status;
+ herr_t status;
hsize_t nfields_out;
hsize_t nrecords_out;
-
+
/* Define the inserted field information */
hid_t field_type_new = H5T_NATIVE_INT;
int data[NRECORDS] = { 0,1,2,3,4,5,6,7 };
-
+
/* Initialize the field type */
string_type = H5Tcopy( H5T_C_S1 );
H5Tset_size( string_type, 16 );
@@ -86,18 +86,18 @@ int main( void )
field_type[2] = H5T_NATIVE_INT;
field_type[3] = H5T_NATIVE_FLOAT;
field_type[4] = H5T_NATIVE_DOUBLE;
-
+
/* Create a new file using default properties. */
file_id = H5Fcreate( "ex_table_11.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
-
+
/* Make the table */
- status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,
- dst_size1,field_names, dst_offset1, field_type,
+ status=H5TBmake_table( "Table Title",file_id,TABLE_NAME,NFIELDS,NRECORDS,
+ dst_size1,field_names, dst_offset1, field_type,
chunk_size, fill_data, compress, p_data );
-
+
/* Insert the new field at the end of the field list */
position = NFIELDS;
- status=H5TBinsert_field( file_id, TABLE_NAME, "New Field", field_type_new, position,
+ status=H5TBinsert_field( file_id, TABLE_NAME, "New Field", field_type_new, position,
fill_data_new, data );
/* Get table info */
@@ -105,12 +105,12 @@ int main( void )
/* print */
printf ("Table has %d fields and %d records\n",(int)nfields_out,(int)nrecords_out);
-
+
/* Close the file. */
H5Fclose( file_id );
-
+
return 0;
-
-
+
+
}
diff --git a/hl/examples/ex_table_12.c b/hl/examples/ex_table_12.c
index 9ec3054..26e238f 100644
--- a/hl/examples/ex_table_12.c
+++ b/hl/examples/ex_table_12.c
@@ -30,15 +30,15 @@
int main( void )
{
- typedef struct Particle
+ typedef struct Particle
{
char name[16];
int lati;
int longi;
float pressure;
- double temperature;
+ double temperature;
} Particle;
-
+
/* Calculate the size and the offsets of our struct members in memory */
size_t dst_size = sizeof( Particle );
size_t dst_offset[NFIELDS] = { HOFFSET( Particle, name ),
@@ -46,9 +46,9 @@ int main( void )
HOFFSET( Particle, longi ),
HOFFSET( Particle, pressure ),
HOFFSET( Particle, temperature )};
-
+
/* Define an array of Particles */
- Particle p_data[NRECORDS] = {
+ Particle p_data[NRECORDS] = {
{"zero",0,0, 0.0f, 0.0},
{"one",10,10, 1.0f, 10.0},
{"two", 20,20, 2.0f, 20.0},
@@ -58,21 +58,21 @@ int main( void )
{"six", 60,60, 6.0f, 60.0},
{"seven",70,70, 7.0f, 70.0}
};
-
+
/* Define field information */
- const char *field_names[NFIELDS] =
+ const char *field_names[NFIELDS] =
{ "Name","Latitude", "Longitude", "Pressure", "Temperature" };
hid_t field_type[NFIELDS];
hid_t string_type;
hid_t file_id;
hsize_t chunk_size = 10;
int compress = 0;
- Particle fill_data[1] =
+ Particle fill_data[1] =
{ {"no data",-1,-1, -99.0f, -99.0} };
- herr_t status;
+ herr_t status;
hsize_t nfields_out;
hsize_t nrecords_out;
-
+
/* Initialize the field type */
string_type = H5Tcopy( H5T_C_S1 );
H5Tset_size( string_type, 16 );
@@ -81,15 +81,15 @@ int main( void )
field_type[2] = H5T_NATIVE_INT;
field_type[3] = H5T_NATIVE_FLOAT;
field_type[4] = H5T_NATIVE_DOUBLE;
-
+
/* Create a new file using default properties. */
file_id = H5Fcreate( "ex_table_12.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
-
+
/* Make a table */
- status=H5TBmake_table( "Table Title", file_id, TABLE_NAME, NFIELDS, NRECORDS, dst_size,
- field_names, dst_offset, field_type,
+ status=H5TBmake_table( "Table Title", file_id, TABLE_NAME, NFIELDS, NRECORDS, dst_size,
+ field_names, dst_offset, field_type,
chunk_size, fill_data, compress, p_data );
-
+
/* Delete the field */
status=H5TBdelete_field( file_id, TABLE_NAME, "Pressure" );
@@ -98,11 +98,11 @@ int main( void )
/* print */
printf ("Table has %d fields and %d records\n",(int)nfields_out,(int)nrecords_out);
-
+
/* Close the file. */
H5Fclose( file_id );
-
+
return 0;
-
+
}
diff --git a/hl/fortran/src/Makefile.am b/hl/fortran/src/Makefile.am
index 6ee69cf..c341031 100644
--- a/hl/fortran/src/Makefile.am
+++ b/hl/fortran/src/Makefile.am
@@ -41,7 +41,7 @@ endif
#endif
libhdf5hl_fortran_la_SOURCES=H5LTfc.c H5IMfc.c H5IMcc.c H5TBfc.c H5LTff.f90 \
- H5IMff.f90 H5TBff.f90
+ H5IMff.f90 H5TBff.f90
# Fortran module files can have different extensions and different names
# (e.g., different capitalizations) on different platforms. Write rules
@@ -70,8 +70,8 @@ uninstall-local:
# determining this automagically (like we do with the C files). So, when
# doing a parallel make, some modules could be made way before the
# modules they depend upon are actually made. *sigh*
-H5LTff.lo: $(srcdir)/H5LTff.f90
+H5LTff.lo: $(srcdir)/H5LTff.f90
H5IMff.lo: $(srcdir)/H5IMff.f90
-H5TBff.lo: $(srcdir)/H5TBff.f90
+H5TBff.lo: $(srcdir)/H5TBff.f90
include $(top_srcdir)/config/conclude.am
diff --git a/hl/src/H5DS.c b/hl/src/H5DS.c
index 254f7df..fa158a5 100644
--- a/hl/src/H5DS.c
+++ b/hl/src/H5DS.c
@@ -25,11 +25,11 @@
/*-------------------------------------------------------------------------
* Function: H5DSset_scale
*
- * Purpose: The dataset DSID is converted to a Dimension Scale dataset.
- * Creates the CLASS attribute, set to the value "DIMENSION_SCALE"
- * and an empty REFERENCE_LIST attribute.
- * If DIMNAME is specified, then an attribute called NAME is created,
- * with the value DIMNAME.
+ * Purpose: The dataset DSID is converted to a Dimension Scale dataset.
+ * Creates the CLASS attribute, set to the value "DIMENSION_SCALE"
+ * and an empty REFERENCE_LIST attribute.
+ * If DIMNAME is specified, then an attribute called NAME is created,
+ * with the value DIMNAME.
*
* Return: Success: SUCCEED, Failure: FAIL
*
@@ -95,17 +95,17 @@ herr_t H5DSset_scale(hid_t dsid,
/*-------------------------------------------------------------------------
* Function: H5DSattach_scale
*
- * Purpose: Define Dimension Scale DSID to be associated with dimension IDX
- * of Dataset DID. Entries are created in the DIMENSION_LIST and
- * REFERENCE_LIST attributes.
+ * Purpose: Define Dimension Scale DSID to be associated with dimension IDX
+ * of Dataset DID. Entries are created in the DIMENSION_LIST and
+ * REFERENCE_LIST attributes.
*
* Return:
* Success: SUCCEED
* Failure: FAIL
*
- * Fails if: Bad arguments
- * If DSID is not a Dimension Scale
- * If DID is a Dimension Scale (A Dimension Scale cannot have scales)
+ * Fails if: Bad arguments
+ * If DSID is not a Dimension Scale
+ * If DID is a Dimension Scale (A Dimension Scale cannot have scales)
*
* Programmer: pvn@ncsa.uiuc.edu
*
@@ -556,20 +556,20 @@ out:
/*-------------------------------------------------------------------------
* Function: H5DSdetach_scale
*
- * Purpose: If possible, deletes association of Dimension Scale DSID with
- * dimension IDX of Dataset DID. This deletes the entries in the
- * DIMENSION_LIST and REFERENCE_LIST attributes.
+ * Purpose: If possible, deletes association of Dimension Scale DSID with
+ * dimension IDX of Dataset DID. This deletes the entries in the
+ * DIMENSION_LIST and REFERENCE_LIST attributes.
*
* Return:
* Success: SUCCEED
* Failure: FAIL
*
- * Fails if: Bad arguments
- * The dataset DID or DSID do not exist.
- * The DSID is not a Dimension Scale
- * DSID is not attached to DID.
- * Note that a scale may be associated with more than dimension of the same dataset.
- * If so, the detach operation only deletes one of the associations, for DID.
+ * Fails if: Bad arguments
+ * The dataset DID or DSID do not exist.
+ * The DSID is not a Dimension Scale
+ * DSID is not attached to DID.
+ * Note that a scale may be associated with more than dimension of the same dataset.
+ * If so, the detach operation only deletes one of the associations, for DID.
*
* Programmer: pvn@ncsa.uiuc.edu
*
@@ -934,9 +934,9 @@ out:
* 0: one of them or both do not match
* FAIL (-1): error
*
- * Fails if: Bad arguments
- * If DSID is not a Dimension Scale
- * If DID is a Dimension Scale (A Dimension Scale cannot have scales)
+ * Fails if: Bad arguments
+ * If DSID is not a Dimension Scale
+ * If DID is a Dimension Scale (A Dimension Scale cannot have scales)
*
* Programmer: pvn@ncsa.uiuc.edu
*
@@ -1209,7 +1209,7 @@ out:
*
* hid_t DID; IN: the dataset
* unsigned int dim; IN: the dimension of the dataset
- * int *idx; IN/OUT: input the index to start iterating, output the
+ * int *idx; IN/OUT: input the index to start iterating, output the
* next index to visit. If NULL, start at the first position.
* H5DS_iterate_t visitor; IN: the visitor function
* void *visitor_data; IN: arbitrary data to pass to the visitor function.
@@ -1964,7 +1964,7 @@ int H5DSget_num_scales(hid_t did,
*-------------------------------------------------------------------------
*/
- else
+ else
{
if ((aid = H5Aopen_name(did,DIMENSION_LIST))<0)
goto out;
diff --git a/hl/src/H5LT.c b/hl/src/H5LT.c
index acc30b8..5db6cd4 100644
--- a/hl/src/H5LT.c
+++ b/hl/src/H5LT.c
@@ -985,11 +985,11 @@ herr_t H5LTget_dataset_info( hid_t loc_id,
/* get the dataspace handle */
if ( (sid = H5Dget_space( did )) < 0 )
goto out;
-
+
/* get dimensions */
if ( H5Sget_simple_extent_dims( sid, dims, NULL) < 0 )
goto out;
-
+
/* terminate access to the dataspace */
if ( H5Sclose( sid ) < 0 )
goto out;
@@ -1756,7 +1756,7 @@ static herr_t find_attr( hid_t loc_id, const char *name, void *op_data)
/*-------------------------------------------------------------------------
* Function: H5LTfind_attribute
*
- * Purpose: Inquires if an attribute named attr_name exists attached to
+ * Purpose: Inquires if an attribute named attr_name exists attached to
* the object loc_id.
*
* Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu
@@ -2039,7 +2039,7 @@ out:
hid_t H5LTtext_to_dtype(const char *text, H5LT_lang_t lang_type)
{
extern int yyparse(void);
- hid_t type_id;
+ hid_t type_id;
if(lang_type <= H5LT_LANG_ERR || lang_type >= H5LT_NO_LANG)
goto out;
@@ -2048,7 +2048,7 @@ hid_t H5LTtext_to_dtype(const char *text, H5LT_lang_t lang_type)
fprintf(stderr, "only DDL is supported for now.\n");
goto out;
}
-
+
input_len = strlen(text);
myinput = strdup(text);
@@ -2118,17 +2118,17 @@ print_enum(hid_t type, char* str, int indt)
size_t dst_size; /*destination value type size */
int i;
herr_t ret = SUCCEED;
-
+
if((nmembs = H5Tget_nmembers(type))==0)
goto out;
assert(nmembs>0);
if((super = H5Tget_super(type))<0)
goto out;
-
+
/* Use buffer of INT or UNSIGNED INT to print enum values because
* we don't expect these values to be so big that INT or UNSIGNED
* INT can't hold.
- */
+ */
if (H5T_SGN_NONE == H5Tget_sign(super)) {
native = H5T_NATIVE_UINT;
} else {
@@ -2137,7 +2137,7 @@ print_enum(hid_t type, char* str, int indt)
super_size = H5Tget_size(super);
dst_size = H5Tget_size(native);
-
+
/* Get the names and raw values of all members */
name = (char**)calloc((size_t)nmembs, sizeof(char *));
value = (unsigned char*)calloc((size_t)nmembs, MAX(dst_size, super_size));
@@ -2154,7 +2154,7 @@ print_enum(hid_t type, char* str, int indt)
if(H5Tconvert(super, native, (size_t)nmembs, value, NULL, H5P_DEFAULT)<0)
goto out;
}
-
+
/*
* Sort members by increasing value
* ***not implemented yet***
@@ -2167,7 +2167,7 @@ print_enum(hid_t type, char* str, int indt)
strcat(str, tmp_str);
sprintf(tmp_str, "%*s ", MAX(0, 16 - nchars), "");
strcat(str, tmp_str);
-
+
if (H5T_SGN_NONE == H5Tget_sign(native)) {
/*On SGI Altix(cobalt), wrong values were printed out with "value+i*dst_size"
*strangely, unless use another pointer "copy".*/
@@ -2226,7 +2226,7 @@ herr_t H5LTdtype_to_text(hid_t dtype, char *str, H5LT_lang_t lang_type, size_t *
size_t str_len = INCREMENT;
char *text_str;
herr_t ret = -1;
-
+
if(lang_type <= H5LT_LANG_ERR || lang_type >= H5LT_NO_LANG)
goto out;
@@ -2265,7 +2265,7 @@ out:
*
*-------------------------------------------------------------------------
*/
-herr_t H5LT_dtype_to_text(hid_t dtype, char **dt_str, H5LT_lang_t lang, size_t *slen,
+herr_t H5LT_dtype_to_text(hid_t dtype, char **dt_str, H5LT_lang_t lang, size_t *slen,
hbool_t no_user_buf)
{
H5T_class_t tcls;
@@ -2282,15 +2282,15 @@ herr_t H5LT_dtype_to_text(hid_t dtype, char **dt_str, H5LT_lang_t lang, size_t *
*dt_str = tmp;
}
}
-
+
if(lang != H5LT_DDL) {
sprintf(*dt_str, "only DDL is supported for now");
goto out;
}
-
+
if((tcls = H5Tget_class(dtype))<0)
goto out;
-
+
switch (tcls) {
case H5T_INTEGER:
if (H5Tequal(dtype, H5T_STD_I8BE)) {
@@ -2451,7 +2451,7 @@ herr_t H5LT_dtype_to_text(hid_t dtype, char **dt_str, H5LT_lang_t lang, size_t *
if (H5Tequal(tmp_type, str_type)) {
strcat(*dt_str, "CTYPE H5T_C_S1;\n");
goto next;
- }
+ }
/* Change the endianness and see if they're equal. */
if((order = H5Tget_order(tmp_type))<0)
@@ -2463,7 +2463,7 @@ herr_t H5LT_dtype_to_text(hid_t dtype, char **dt_str, H5LT_lang_t lang, size_t *
if(H5Tset_order(str_type, H5T_ORDER_BE)<0)
goto out;
}
-
+
if (H5Tequal(tmp_type, str_type)) {
strcat(*dt_str, "H5T_C_S1;\n");
goto next;
@@ -2481,7 +2481,7 @@ herr_t H5LT_dtype_to_text(hid_t dtype, char **dt_str, H5LT_lang_t lang, size_t *
goto out;
if(H5Tset_strpad(str_type, str_pad)<0)
goto out;
-
+
/* Are the two types equal? */
if (H5Tequal(tmp_type, str_type)) {
strcat(*dt_str, "CTYPE H5T_FORTRAN_S1;\n");
@@ -2523,7 +2523,7 @@ next:
/* Print lead-in */
sprintf(*dt_str, "H5T_OPAQUE {\n");
indent += COL;
-
+
indentation(indent + COL, *dt_str);
sprintf(tmp_str, "OPQ_SIZE %lu;\n", (unsigned long)H5Tget_size(dtype));
strcat(*dt_str, tmp_str);
@@ -2531,7 +2531,7 @@ next:
indentation(indent + COL, *dt_str);
sprintf(tmp_str, "OPQ_TAG \"%s\";\n", H5Tget_tag(dtype));
strcat(*dt_str, tmp_str);
-
+
/* Print closing */
indent -= COL;
indentation(indent + COL, *dt_str);
@@ -2548,14 +2548,14 @@ next:
sprintf(*dt_str, "H5T_ENUM {\n");
indent += COL;
indentation(indent + COL, *dt_str);
-
+
if((super = H5Tget_super(dtype))<0)
goto out;
if(H5LTdtype_to_text(super, NULL, lang, &super_len)<0)
goto out;
stmp = (char*)calloc(super_len, sizeof(char));
if(H5LTdtype_to_text(super, stmp, lang, &super_len)<0)
- goto out;
+ goto out;
strcat(*dt_str, stmp);
free(stmp);
strcat(*dt_str, ";\n");
@@ -2588,7 +2588,7 @@ next:
goto out;
stmp = (char*)calloc(super_len, sizeof(char));
if(H5LTdtype_to_text(super, stmp, lang, &super_len)<0)
- goto out;
+ goto out;
strcat(*dt_str, stmp);
free(stmp);
strcat(*dt_str, "\n");
@@ -2633,7 +2633,7 @@ next:
goto out;
stmp = (char*)calloc(super_len, sizeof(char));
if(H5LTdtype_to_text(super, stmp, lang, &super_len)<0)
- goto out;
+ goto out;
strcat(*dt_str, stmp);
free(stmp);
strcat(*dt_str, "\n");
@@ -2655,7 +2655,7 @@ next:
size_t mlen;
char* mtmp;
int nmembs;
-
+
if((nmembs = H5Tget_nmembers(dtype))<0)
goto out;
@@ -2679,7 +2679,7 @@ next:
goto out;
mtmp = (char*)calloc(mlen, sizeof(char));
if(H5LTdtype_to_text(mtype, mtmp, lang, &mlen)<0)
- goto out;
+ goto out;
strcat(*dt_str, mtmp);
free(mtmp);
diff --git a/hl/src/H5LTanalyze.c b/hl/src/H5LTanalyze.c
index 2b31f11..2b2603c 100644
--- a/hl/src/H5LTanalyze.c
+++ b/hl/src/H5LTanalyze.c
@@ -829,7 +829,7 @@ hbool_t first_quote = 1;
/* For Lex and Yacc */
/*int input_len;
char *myinput;*/
-
+
#define TAG_STRING 1
#line 815 "H5LTanalyze.c"
@@ -1268,17 +1268,17 @@ YY_RULE_SETUP
case 40:
YY_RULE_SETUP
#line 120 "H5LTanalyze.l"
-{return token(H5T_STR_NULLTERM_TOKEN);}
+{return token(H5T_STR_NULLTERM_TOKEN);}
YY_BREAK
case 41:
YY_RULE_SETUP
#line 121 "H5LTanalyze.l"
-{return token(H5T_STR_NULLPAD_TOKEN);}
+{return token(H5T_STR_NULLPAD_TOKEN);}
YY_BREAK
case 42:
YY_RULE_SETUP
#line 122 "H5LTanalyze.l"
-{return token(H5T_STR_SPACEPAD_TOKEN);}
+{return token(H5T_STR_SPACEPAD_TOKEN);}
YY_BREAK
case 43:
YY_RULE_SETUP
@@ -1343,12 +1343,12 @@ YY_RULE_SETUP
case 55:
YY_RULE_SETUP
#line 138 "H5LTanalyze.l"
-{
- if( is_str_size || (is_enum && is_enum_memb) ||
+{
+ if( is_str_size || (is_enum && is_enum_memb) ||
is_opq_size || (asindex>-1 && arr_stack[asindex].is_dim) ||
(csindex>-1 && cmpd_stack[csindex].is_field) ) {
yylval.ival = atoi(yytext);
- return NUMBER;
+ return NUMBER;
} else
REJECT;
}
@@ -1358,7 +1358,7 @@ YY_RULE_SETUP
#line 148 "H5LTanalyze.l"
{
/*if it's first quote, and is a compound field name or an enum symbol*/
- if((is_opq_tag || is_enum || (csindex>-1 && cmpd_stack[csindex].is_field))
+ if((is_opq_tag || is_enum || (csindex>-1 && cmpd_stack[csindex].is_field))
&& first_quote) {
first_quote = 0;
BEGIN TAG_STRING;
@@ -2302,8 +2302,8 @@ int main()
int my_yyinput(char *buf, int max_size)
{
int ret;
-
- memcpy(buf, myinput, input_len);
+
+ memcpy(buf, myinput, input_len);
ret = input_len;
return ret;
}
diff --git a/hl/src/H5LTparse.c b/hl/src/H5LTparse.c
index 4867d5a..32258d0 100644
--- a/hl/src/H5LTparse.c
+++ b/hl/src/H5LTparse.c
@@ -17,7 +17,7 @@
*/
#ifndef lint
-static char const
+static char const
yyrcsid[] = "$FreeBSD: src/usr.bin/yacc/skeleton.c,v 1.28 2000/01/17 02:04:06 bde Exp $";
#endif
#include <stdlib.h>
@@ -65,7 +65,7 @@ struct arr_info {
};
/*stack for nested array type*/
struct arr_info arr_stack[STACK_SIZE];
-int asindex = -1; /*pointer to the top of array stack*/
+int asindex = -1; /*pointer to the top of array stack*/
hbool_t is_str_size = 0; /*flag to lexer for string size*/
hbool_t is_str_pad = 0; /*flag to lexer for string padding*/
@@ -73,7 +73,7 @@ H5T_pad_t str_pad; /*variable for string padding*/
H5T_cset_t str_cset; /*variable for string character set*/
hbool_t is_variable = 0; /*variable for variable-length string*/
size_t str_size; /*variable for string size*/
-
+
hid_t enum_id; /*type ID*/
hbool_t is_enum = 0; /*flag to lexer for enum type*/
hbool_t is_enum_memb = 0; /*flag to lexer for enum member*/
@@ -796,9 +796,9 @@ case 46:
break;
case 47:
#line 155 "H5LTparse.y"
-{ yyval.ival = cmpd_stack[csindex].id;
+{ yyval.ival = cmpd_stack[csindex].id;
cmpd_stack[csindex].id = 0;
- cmpd_stack[csindex].first_memb = 1;
+ cmpd_stack[csindex].first_memb = 1;
csindex--;
}
break;
@@ -808,7 +808,7 @@ case 50:
break;
case 51:
#line 166 "H5LTparse.y"
-{
+{
size_t origin_size, new_size;
hid_t dtype_id = cmpd_stack[csindex].id;
@@ -822,7 +822,7 @@ case 51:
cmpd_stack[csindex].first_memb = 0;
} else {
origin_size = H5Tget_size(dtype_id);
-
+
if(yyvsp[-1].ival == 0) {
new_size = origin_size + H5Tget_size(yyvsp[-6].ival);
H5Tset_size(dtype_id, new_size);
@@ -833,10 +833,10 @@ case 51:
H5Tinsert(dtype_id, yyvsp[-3].sval, yyvsp[-1].ival, yyvsp[-6].ival);
}
}
-
+
cmpd_stack[csindex].is_field = 0;
H5Tclose(yyvsp[-6].ival);
-
+
new_size = H5Tget_size(dtype_id);
}
break;
@@ -860,7 +860,7 @@ case 56:
break;
case 57:
#line 212 "H5LTparse.y"
-{
+{
yyval.ival = H5Tarray_create(yyvsp[-1].ival, arr_stack[asindex].ndims, arr_stack[asindex].dims, NULL);
arr_stack[asindex].ndims = 0;
asindex--;
@@ -874,9 +874,9 @@ break;
case 61:
#line 223 "H5LTparse.y"
{ int ndims = arr_stack[asindex].ndims;
- arr_stack[asindex].dims[ndims] = (hsize_t)yylval.ival;
+ arr_stack[asindex].dims[ndims] = (hsize_t)yylval.ival;
arr_stack[asindex].ndims++;
- arr_stack[asindex].is_dim = 0;
+ arr_stack[asindex].is_dim = 0;
}
break;
case 64:
@@ -889,10 +889,10 @@ case 65:
break;
case 66:
#line 240 "H5LTparse.y"
-{
+{
size_t size = (size_t)yylval.ival;
yyval.ival = H5Tcreate(H5T_OPAQUE, size);
- is_opq_size = 0;
+ is_opq_size = 0;
}
break;
case 67:
@@ -901,7 +901,7 @@ case 67:
break;
case 68:
#line 246 "H5LTparse.y"
-{
+{
H5Tset_tag(yyvsp[-6].ival, yylval.sval);
is_opq_tag = 0;
}
@@ -916,12 +916,12 @@ case 72:
break;
case 73:
#line 259 "H5LTparse.y"
-{
+{
if(yyvsp[-1].ival == H5T_VARIABLE_TOKEN)
is_variable = 1;
- else
+ else
str_size = yylval.ival;
- is_str_size = 0;
+ is_str_size = 0;
}
break;
case 74:
@@ -937,7 +937,7 @@ case 74:
break;
case 75:
#line 276 "H5LTparse.y"
-{
+{
if(yyvsp[-1].ival == H5T_CSET_ASCII_TOKEN)
str_cset = H5T_CSET_ASCII;
else if(yyvsp[-1].ival == H5T_CSET_UTF8_TOKEN)
@@ -955,7 +955,7 @@ case 76:
break;
case 77:
#line 290 "H5LTparse.y"
-{
+{
hid_t str_id = yyvsp[-1].ival;
/*set string size*/
@@ -964,12 +964,12 @@ case 77:
is_variable = 0;
} else
H5Tset_size(str_id, str_size);
-
+
/*set string padding and character set*/
H5Tset_strpad(str_id, str_pad);
H5Tset_cset(str_id, str_cset);
- yyval.ival = str_id;
+ yyval.ival = str_id;
}
break;
case 78:
@@ -1016,7 +1016,7 @@ case 91:
#line 329 "H5LTparse.y"
{
is_enum_memb = 1; /*indicate member of enum*/
- enum_memb_symbol = strdup(yylval.sval);
+ enum_memb_symbol = strdup(yylval.sval);
}
break;
case 92:
@@ -1029,7 +1029,7 @@ case 92:
long_long llong_val=(long_long)yylval.ival;
hid_t super = H5Tget_super(enum_id);
hid_t native = H5Tget_native_type(super, H5T_DIR_ASCEND);
-
+
if(is_enum && is_enum_memb) { /*if it's an enum member*/
/*To handle machines of different endianness*/
if(H5Tequal(native, H5T_NATIVE_SCHAR) || H5Tequal(native, H5T_NATIVE_UCHAR))
@@ -1043,7 +1043,7 @@ case 92:
else if(H5Tequal(native, H5T_NATIVE_LLONG) || H5Tequal(native, H5T_NATIVE_ULLONG))
H5Tenum_insert(enum_id, enum_memb_symbol, &llong_val);
- is_enum_memb = 0;
+ is_enum_memb = 0;
if(enum_memb_symbol) free(enum_memb_symbol);
}
diff --git a/hl/src/H5LTprivate.h b/hl/src/H5LTprivate.h
index 426c223..6e54c54 100644
--- a/hl/src/H5LTprivate.h
+++ b/hl/src/H5LTprivate.h
@@ -19,7 +19,7 @@
#include "H5HLprivate2.h"
/* public LT prototypes */
-#include "H5LTpublic.h"
+#include "H5LTpublic.h"
/*-------------------------------------------------------------------------
* Private functions
@@ -49,7 +49,7 @@ H5_HLDLL herr_t H5LT_set_attribute_string( hid_t dset_id,
H5_HLDLL herr_t H5LT_find_attribute( hid_t loc_id, const char *name );
-H5_HLDLL herr_t H5LT_dtype_to_text(hid_t dtype, char **dt_str, H5LT_lang_t lang,
+H5_HLDLL herr_t H5LT_dtype_to_text(hid_t dtype, char **dt_str, H5LT_lang_t lang,
size_t *slen, hbool_t no_user_buf);
diff --git a/hl/src/H5PTprivate.h b/hl/src/H5PTprivate.h
index 3bba9d2..4c36813 100644
--- a/hl/src/H5PTprivate.h
+++ b/hl/src/H5PTprivate.h
@@ -19,7 +19,7 @@
#include "H5HLprivate2.h"
/* public LT prototypes */
-#include "H5PTpublic.h"
+#include "H5PTpublic.h"
#endif
diff --git a/hl/src/H5TB.c b/hl/src/H5TB.c
index d9579c3..7cbd5c4 100644
--- a/hl/src/H5TB.c
+++ b/hl/src/H5TB.c
@@ -3686,40 +3686,40 @@ herr_t H5TB_common_append_records( hid_t dataset_id,
hid_t mem_space_id = H5I_BADID;
hsize_t dims[1];
hsize_t mem_dims[1];
-
+
/* Extend the dataset */
dims[0] = nrecords + orig_table_size;
if ( H5Dextend ( dataset_id, dims ) < 0 )
goto out;
-
+
/* Create a simple memory data space */
mem_dims[0]=nrecords;
if ( (mem_space_id = H5Screate_simple( 1, mem_dims, NULL )) < 0 )
goto out;
-
+
/* Get a copy of the new file data space for writing */
if ( (space_id = H5Dget_space( dataset_id )) < 0 )
goto out;
-
+
/* Define a hyperslab in the dataset */
offset[0] = orig_table_size;
count[0] = nrecords;
if ( H5Sselect_hyperslab( space_id, H5S_SELECT_SET, offset, NULL, count, NULL)<0)
goto out;
-
+
/* Write the records */
if ( H5Dwrite( dataset_id, mem_type_id, mem_space_id, space_id, H5P_DEFAULT, data )<0)
goto out;
-
+
/* Terminate access to the dataspace */
if ( H5Sclose( mem_space_id ) < 0 )
goto out;
-
+
if ( H5Sclose( space_id ) < 0 )
goto out;
-
+
return 0;
-
+
out:
H5E_BEGIN_TRY
H5Sclose(mem_space_id);
@@ -3760,38 +3760,38 @@ herr_t H5TB_common_read_records( hid_t dataset_id,
hid_t space_id = H5I_BADID;
hid_t mem_space_id = H5I_BADID;
hsize_t mem_size[1];
-
+
/* Make sure the read request is in bounds */
if ( start + nrecords > table_size )
goto out;
-
+
/* Get the dataspace handle */
if ( (space_id = H5Dget_space( dataset_id )) < 0 )
goto out;
-
+
/* Define a hyperslab in the dataset of the size of the records */
offset[0] = start;
count[0] = nrecords;
if ( H5Sselect_hyperslab( space_id, H5S_SELECT_SET, offset, NULL, count, NULL) < 0 )
goto out;
-
+
/* Create a memory dataspace handle */
mem_size[0] = count[0];
if ((mem_space_id = H5Screate_simple( 1, mem_size, NULL )) < 0 )
goto out;
if ((H5Dread( dataset_id, mem_type_id, mem_space_id, space_id, H5P_DEFAULT, data))<0)
goto out;
-
+
/* Terminate access to the memory dataspace */
if ( H5Sclose( mem_space_id ) < 0 )
goto out;
-
+
/* Terminate access to the dataspace */
if ( H5Sclose( space_id ) < 0 )
goto out;
-
+
return 0;
-
+
out:
H5E_BEGIN_TRY
H5Sclose(space_id);
diff --git a/hl/test/test_lite.c b/hl/test/test_lite.c
index 07002e0..19d772f 100644
--- a/hl/test/test_lite.c
+++ b/hl/test/test_lite.c
@@ -1057,31 +1057,31 @@ static int test_integers(void)
size_t str_len;
TESTING3("\n text for integer types");
-
+
if((dtype = H5LTtext_to_dtype("H5T_NATIVE_INT\n", H5LT_DDL))<0)
goto out;
if(!H5Tequal(dtype, H5T_NATIVE_INT))
goto out;
if(H5Tclose(dtype)<0)
goto out;
-
+
if((dtype = H5LTtext_to_dtype("H5T_STD_I8BE\n", H5LT_DDL))<0)
goto out;
if(!H5Tequal(dtype, H5T_STD_I8BE))
goto out;
-
+
if(H5LTdtype_to_text(dtype, NULL, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
dt_str = (char*)calloc(str_len, sizeof(char));
if(H5LTdtype_to_text(dtype, dt_str, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
if(strcmp(dt_str, "H5T_STD_I8BE"))
goto out;
free(dt_str);
if(H5Tclose(dtype)<0)
goto out;
-
+
if((dtype = H5LTtext_to_dtype("H5T_STD_U16LE\n", H5LT_DDL))<0)
goto out;
if(!H5Tequal(dtype, H5T_STD_U16LE))
@@ -1108,31 +1108,31 @@ static int test_fps(void)
size_t str_len;
TESTING3(" text for floating-point types");
-
+
if((dtype = H5LTtext_to_dtype("H5T_NATIVE_LDOUBLE\n", H5LT_DDL))<0)
goto out;
if(!H5Tequal(dtype, H5T_NATIVE_LDOUBLE))
goto out;
if(H5Tclose(dtype)<0)
goto out;
-
+
if((dtype = H5LTtext_to_dtype("H5T_IEEE_F32BE\n", H5LT_DDL))<0)
goto out;
if(!H5Tequal(dtype, H5T_IEEE_F32BE))
goto out;
-
+
if(H5LTdtype_to_text(dtype, NULL, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
dt_str = (char*)calloc(str_len, sizeof(char));
if(H5LTdtype_to_text(dtype, dt_str, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
if(strcmp(dt_str, "H5T_IEEE_F32BE"))
goto out;
free(dt_str);
if(H5Tclose(dtype)<0)
goto out;
-
+
if((dtype = H5LTtext_to_dtype("H5T_IEEE_F64LE\n", H5LT_DDL))<0)
goto out;
if(!H5Tequal(dtype, H5T_IEEE_F64LE))
@@ -1161,12 +1161,12 @@ static int test_strings(void)
H5T_class_t type_class;
char* dt_str;
size_t str_len;
-
+
TESTING3(" text for string types");
-
+
if((dtype = H5LTtext_to_dtype("H5T_STRING { STRSIZE 13; STRPAD H5T_STR_NULLTERM; CSET H5T_CSET_ASCII; CTYPE H5T_C_S1; }", H5LT_DDL))<0)
goto out;
-
+
if((type_class = H5Tget_class(dtype))<0)
goto out;
if(type_class != H5T_STRING)
@@ -1175,57 +1175,57 @@ static int test_strings(void)
str_size = H5Tget_size(dtype);
if(str_size != 13)
goto out;
-
+
str_pad = H5Tget_strpad(dtype);
if(str_pad != H5T_STR_NULLTERM)
goto out;
-
+
str_cset = H5Tget_cset(dtype);
if(str_cset != H5T_CSET_ASCII)
goto out;
-
+
if(H5LTdtype_to_text(dtype, NULL, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
dt_str = (char*)calloc(str_len, sizeof(char));
if(H5LTdtype_to_text(dtype, dt_str, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
if(strcmp(dt_str, "H5T_STRING {\n STRSIZE 13;\n STRPAD H5T_STR_NULLTERM;\n CSET H5T_CSET_ASCII;\n CTYPE H5T_C_S1;\n }")) {
printf("dt=\n%s\n", dt_str);
goto out;
}
free(dt_str);
-
+
if(H5Tclose(dtype)<0)
goto out;
-
+
if((dtype = H5LTtext_to_dtype("H5T_STRING { STRSIZE H5T_VARIABLE; STRPAD H5T_STR_NULLPAD; CSET H5T_CSET_ASCII; CTYPE H5T_C_S1; }", H5LT_DDL))<0)
goto out;
-
+
if(!H5Tis_variable_str(dtype))
goto out;
-
+
str_pad = H5Tget_strpad(dtype);
if(str_pad != H5T_STR_NULLPAD)
goto out;
-
+
str_cset = H5Tget_cset(dtype);
if(str_cset != H5T_CSET_ASCII)
goto out;
if(H5LTdtype_to_text(dtype, NULL, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
dt_str = (char*)calloc(str_len, sizeof(char));
if(H5LTdtype_to_text(dtype, dt_str, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
if(strcmp(dt_str, "H5T_STRING {\n STRSIZE H5T_VARIABLE;\n STRPAD H5T_STR_NULLPAD;\n CSET H5T_CSET_ASCII;\n CTYPE H5T_C_S1;\n }")) {
printf("dt=\n%s\n", dt_str);
goto out;
}
free(dt_str);
-
+
if(H5Tclose(dtype)<0)
goto out;
-
+
PASSED();
return 0;
@@ -1247,25 +1247,25 @@ static int test_opaques(void)
size_t str_len;
TESTING3(" text for opaque types");
-
+
if((dtype = H5LTtext_to_dtype("H5T_OPAQUE { OPQ_SIZE 19; OPQ_TAG \"This is a tag for opaque type\"; }", H5LT_DDL))<0)
goto out;
-
+
if((type_class = H5Tget_class(dtype))<0)
goto out;
if(type_class != H5T_OPAQUE)
goto out;
-
+
if((opq_size = H5Tget_size(dtype)) == 0)
goto out;
if(opq_size != 19)
goto out;
-
+
if(H5LTdtype_to_text(dtype, NULL, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
dt_str = (char*)calloc(str_len, sizeof(char));
if(H5LTdtype_to_text(dtype, dt_str, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
if(strcmp(dt_str, "H5T_OPAQUE {\n OPQ_SIZE 19;\n OPQ_TAG \"This is a tag for opaque type\";\n }")) {
printf("dt=\n%s\n", dt_str);
goto out;
@@ -1274,7 +1274,7 @@ static int test_opaques(void)
if(H5Tclose(dtype)<0)
goto out;
-
+
PASSED();
return 0;
@@ -1298,12 +1298,12 @@ static int test_enums(void)
H5T_class_t type_class;
char* dt_str;
size_t str_len;
-
+
TESTING3(" text for enum types");
-
+
if((dtype = H5LTtext_to_dtype("H5T_ENUM { H5T_STD_I32LE; \"RED\" 5; \"GREEN\" 6; \"BLUE\" 7; \"WHITE\" 8; }", H5LT_DDL))<0)
goto out;
-
+
if((type_class = H5Tget_class(dtype))<0)
goto out;
if(type_class != H5T_ENUM)
@@ -1312,27 +1312,27 @@ static int test_enums(void)
if(H5Tenum_nameof(dtype, &value1, name1, size)<0)
goto out;
if(strcmp(name1, "BLUE"))
- goto out;
-
+ goto out;
+
if(H5Tenum_valueof(dtype, name2, &value2)<0)
goto out;
if(value2 != 8)
goto out;
-
+
if(H5LTdtype_to_text(dtype, NULL, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
dt_str = (char*)calloc(str_len, sizeof(char));
if(H5LTdtype_to_text(dtype, dt_str, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
/*if(strcmp(dt_str, "H5T_ENUM {\n H5T_STD_I32LE;\n \"RED\" 5;\n \"GREEN\" 6;\n \"BLUE\" 7;\n \"WHITE\" 8;\n }")) {
printf("dt=\n%s\n", dt_str);
goto out;
}*/
free(dt_str);
-
+
if(H5Tclose(dtype)<0)
goto out;
-
+
PASSED();
return 0;
@@ -1351,34 +1351,34 @@ static int test_variables(void)
H5T_class_t type_class;
char* dt_str;
size_t str_len;
-
+
TESTING3(" text for variable types");
-
+
if((dtype = H5LTtext_to_dtype("H5T_VLEN { H5T_NATIVE_CHAR }\n", H5LT_DDL))<0)
goto out;
-
+
if((type_class = H5Tget_class(dtype))<0)
goto out;
if(type_class != H5T_VLEN)
goto out;
-
+
if(H5Tis_variable_str(dtype))
goto out;
if(H5Tclose(dtype)<0)
goto out;
-
+
if((dtype = H5LTtext_to_dtype("H5T_VLEN { H5T_VLEN { H5T_STD_I32BE } }", H5LT_DDL))<0)
goto out;
-
+
if(H5Tis_variable_str(dtype))
goto out;
if(H5LTdtype_to_text(dtype, NULL, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
dt_str = (char*)calloc(str_len, sizeof(char));
if(H5LTdtype_to_text(dtype, dt_str, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
if(strcmp(dt_str, "H5T_VLEN {\n H5T_VLEN {\n H5T_STD_I32BE\n }\n }")) {
printf("dt=\n%s\n", dt_str);
goto out;
@@ -1387,7 +1387,7 @@ static int test_variables(void)
if(H5Tclose(dtype)<0)
goto out;
-
+
PASSED();
return 0;
@@ -1410,19 +1410,19 @@ static int test_arrays(void)
size_t str_len;
TESTING3(" text for array types");
-
+
if((dtype = H5LTtext_to_dtype("H5T_ARRAY { [5][7][13] H5T_ARRAY { [17][19] H5T_COMPOUND { H5T_STD_I8BE \"arr_compound_1\"; H5T_STD_I32BE \"arr_compound_2\"; } } }", H5LT_DDL))<0)
goto out;
-
+
if((type_class = H5Tget_class(dtype))<0)
goto out;
if(type_class != H5T_ARRAY)
goto out;
-
+
if((ndims = H5Tget_array_ndims(dtype))<0)
goto out;
if(ndims != 3)
- goto out;
+ goto out;
if(H5Tget_array_dims(dtype, dims, NULL)<0)
goto out;
@@ -1430,10 +1430,10 @@ static int test_arrays(void)
goto out;
if(H5LTdtype_to_text(dtype, NULL, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
dt_str = (char*)calloc(str_len, sizeof(char));
if(H5LTdtype_to_text(dtype, dt_str, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
/*if(strcmp(dt_str, "H5T_ARRAY { [5][7][13] H5T_ARRAY { [17][19] H5T_COMPOUND { H5T_STD_I8BE \"arr_compound_1\"; H5T_STD_I32BE \"arr_compound_2\"; } } }")) {
printf("dt=\n%s\n", dt_str);
goto out;
@@ -1442,7 +1442,7 @@ static int test_arrays(void)
if(H5Tclose(dtype)<0)
goto out;
-
+
PASSED();
return 0;
@@ -1464,12 +1464,12 @@ static int test_compounds(void)
H5T_class_t type_class;
char* dt_str;
size_t str_len;
-
+
TESTING3(" text for compound types");
-
+
if((dtype = H5LTtext_to_dtype("H5T_COMPOUND { H5T_STD_I16BE \"one_field\" : 2; H5T_STD_U8LE \"two_field\" : 6; }", H5LT_DDL))<0)
goto out;
-
+
if((type_class = H5Tget_class(dtype))<0)
goto out;
if(type_class != H5T_COMPOUND)
@@ -1481,26 +1481,26 @@ static int test_compounds(void)
goto out;
if(H5LTdtype_to_text(dtype, NULL, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
dt_str = (char*)calloc(str_len, sizeof(char));
if(H5LTdtype_to_text(dtype, dt_str, H5LT_DDL, &str_len)<0)
- goto out;
+ goto out;
if(strcmp(dt_str, "H5T_COMPOUND {\n H5T_STD_I16BE \"one_field\" : 2;\n H5T_STD_U8LE \"two_field\" : 6;\n }")) {
printf("dt=\n%s\n", dt_str);
goto out;
}
free(dt_str);
-
+
if(H5Tclose(dtype)<0)
goto out;
-
+
if((dtype = H5LTtext_to_dtype("H5T_COMPOUND { H5T_STD_I32BE \"i32_field\"; H5T_STD_I16BE \"i16_field\"; H5T_COMPOUND { H5T_STD_I16BE \"sec_field\"; H5T_COMPOUND { H5T_STD_I32BE \"thd_field\"; } \"grandchild\"; } \"child_compound\"; H5T_STD_I8BE \"i8_field\"; }", H5LT_DDL))<0)
goto out;
-
+
if((memb_name = H5Tget_member_name(dtype, 1)) == NULL)
- goto out;
+ goto out;
if(strcmp(memb_name, "i16_field"))
- goto out;
+ goto out;
free(memb_name);
if((memb_class = H5Tget_member_class(dtype, 2))<0)
@@ -1531,7 +1531,7 @@ static int test_complicated_compound(void)
int size = 1024;
char *srcdir = getenv("srcdir"); /* the source directory */
char filename[1024]="";
-
+
TESTING3(" text for complicated compound types");
/* compose the name of the file to open, using the srcdir, if appropriate */
@@ -1550,8 +1550,8 @@ static int test_complicated_compound(void)
goto out;
}
- /* This part reads in the input as a string in a slow manner. GNU C
- * Library has convenient function getline() but isn't available on
+ /* This part reads in the input as a string in a slow manner. GNU C
+ * Library has convenient function getline() but isn't available on
* all machines.
*/
if((line = (char*)calloc(size, sizeof(char)))==NULL)
@@ -1574,13 +1574,13 @@ static int test_complicated_compound(void)
if((dtype = H5LTtext_to_dtype(line, H5LT_DDL))<0)
goto out;
-
+
if((type_class = H5Tget_class(dtype))<0)
goto out;
if(type_class != H5T_COMPOUND)
goto out;
- /* There should be 101 compound members */
+ /* There should be 101 compound members */
if((nmembs = H5Tget_nmembers(dtype))<0)
goto out;
if(nmembs != 101)
@@ -1601,7 +1601,7 @@ out:
* test H5LTtext_to_dtype function
*-------------------------------------------------------------------------
*/
-static int test_text_dtype(void)
+static int test_text_dtype(void)
{
TESTING("H5LTtext_to_dtype");
@@ -1613,22 +1613,22 @@ static int test_text_dtype(void)
if(test_strings()<0)
goto out;
-
+
if(test_opaques()<0)
goto out;
-
+
if(test_enums()<0)
goto out;
-
+
if(test_variables()<0)
goto out;
-
+
if(test_arrays()<0)
goto out;
-
+
if(test_compounds()<0)
goto out;
-
+
if(test_complicated_compound()<0)
goto out;
@@ -1651,7 +1651,7 @@ int main( void )
/* test attribute functions */
nerrors += test_attr();
-
+
/* test text-dtype functions */
nerrors += test_text_dtype();
diff --git a/perform/benchpar.c b/perform/benchpar.c
index 947a20c..517e4e0 100644
--- a/perform/benchpar.c
+++ b/perform/benchpar.c
@@ -23,15 +23,15 @@
/* Local macros */
-/*
+/*
* HDF Boolean type.
*/
#ifndef FALSE
# define FALSE 0
#endif
#ifndef TRUE
-# define TRUE 1
-#endif
+# define TRUE 1
+#endif
/* defines for type of VFL driver to use */
#define FACC_DEFAULT 0
diff --git a/perform/pio_engine.c b/perform/pio_engine.c
index 8b0a7d0..833eabd 100644
--- a/perform/pio_engine.c
+++ b/perform/pio_engine.c
@@ -652,12 +652,12 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
} /* end if */
/* 2D dataspace */
else {
- /* Build partial buffer derived type for contiguous access */
+ /* Build partial buffer derived type for contiguous access */
mrc = MPI_Type_contiguous((int)buf_size, MPI_BYTE,
&contig_cont);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
- /* Build contiguous file's derived type */
+ /* Build contiguous file's derived type */
mrc = MPI_Type_vector((int)blk_size, (int)1, (int)(snbytes/buf_size),
contig_cont, &mpi_cont_type);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
@@ -666,12 +666,12 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
mrc = MPI_Type_commit(&mpi_cont_type);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_COMMIT");
- /* Build partial buffer derived type for interleaved access */
+ /* Build partial buffer derived type for interleaved access */
mrc = MPI_Type_contiguous((int)blk_size, MPI_BYTE,
&contig_inter);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
- /* Build interleaved file's derived type */
+ /* Build interleaved file's derived type */
mrc = MPI_Type_vector((int)buf_size, (int)1, (int)(snbytes/blk_size),
contig_inter, &mpi_inter_type);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
@@ -707,7 +707,7 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
h5count[0] = buf_size/blk_size;
} /* end else */
hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
- h5start, h5stride, h5count, h5block);
+ h5start, h5stride, h5count, h5block);
VRFY((hrc >= 0), "H5Sselect_hyperslab");
} /* end if */
else {
@@ -757,7 +757,7 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
h5count[1] = 1;
} /* end else */
hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
- h5start, h5stride, h5count, h5block);
+ h5start, h5stride, h5count, h5block);
VRFY((hrc >= 0), "H5Sselect_hyperslab");
} /* end if */
else {
@@ -959,7 +959,7 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Set the base of user's buffer */
buf_p = (unsigned char *)buffer;
- /* Set the number of bytes to transfer this time */
+ /* Set the number of bytes to transfer this time */
nbytes_toxfer = buf_size*blk_size;
/* Compute file offset */
@@ -995,7 +995,7 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Set the base of user's buffer */
buf_p=(unsigned char *)buffer;
- /* Set the number of bytes to transfer this time */
+ /* Set the number of bytes to transfer this time */
nbytes_toxfer=buf_size*blk_size;
/* Compute file offset */
@@ -1522,7 +1522,7 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
&contig_cont);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
- /* Build contiguous file's derived type */
+ /* Build contiguous file's derived type */
mrc = MPI_Type_vector((int)blk_size, (int)1, (int)(snbytes/buf_size),
contig_cont, &mpi_cont_type);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
@@ -1536,7 +1536,7 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
&contig_inter);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
- /* Build interleaved file's derived type */
+ /* Build interleaved file's derived type */
mrc = MPI_Type_vector((int)buf_size, (int)1, (int)(snbytes/blk_size),
contig_inter, &mpi_inter_type);
VRFY((mrc==MPI_SUCCESS), "MPIO_TYPE_CREATE");
@@ -1572,7 +1572,7 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
h5count[0] = buf_size/blk_size;
} /* end else */
hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
- h5start, h5stride, h5count, h5block);
+ h5start, h5stride, h5count, h5block);
VRFY((hrc >= 0), "H5Sselect_hyperslab");
} /* end if */
else {
@@ -1622,7 +1622,7 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
h5count[1] = 1;
} /* end else */
hrc = H5Sselect_hyperslab(h5dset_space_id, H5S_SELECT_SET,
- h5start, h5stride, h5count, h5block);
+ h5start, h5stride, h5count, h5block);
VRFY((hrc >= 0), "H5Sselect_hyperslab");
} /* end if */
else {
@@ -1783,7 +1783,7 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Set the base of user's buffer */
buf_p = (unsigned char *)buffer;
- /* Set the number of bytes to transfer this time */
+ /* Set the number of bytes to transfer this time */
nbytes_toxfer = buf_size*blk_size;
/* Compute file offset */
@@ -1819,7 +1819,7 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets,
/* Set the base of user's buffer */
buf_p=(unsigned char *)buffer;
- /* Set the number of bytes to transfer this time */
+ /* Set the number of bytes to transfer this time */
nbytes_toxfer=buf_size*blk_size;
/* Compute file offset */
@@ -2303,7 +2303,7 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
} /* end if */
else {
/* Set the file driver to the MPI-I/O driver */
- hrc = H5Pset_fapl_mpio(acc_tpl, pio_comm_g, h5_io_info_g);
+ hrc = H5Pset_fapl_mpio(acc_tpl, pio_comm_g, h5_io_info_g);
if (hrc < 0) {
fprintf(stderr, "HDF5 Property List Set failed\n");
GOTOERROR(FAIL);
@@ -2338,7 +2338,7 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
}
break;
- }
+ }
done:
return ret_code;
diff --git a/perform/pio_perf.c b/perform/pio_perf.c
index f19afa7..2bb3a67 100644
--- a/perform/pio_perf.c
+++ b/perform/pio_perf.c
@@ -465,7 +465,7 @@ run_test_loop(struct options *opts)
parms.num_bytes = (off_t)pow((double)(opts->num_bpp*parms.num_procs),2);
if (parms.interleaved)
output_report("Transfer Buffer Size: %ldx%ld bytes, File size: %.2f MBs\n",
- buf_size, opts->blk_size,
+ buf_size, opts->blk_size,
((double)parms.num_dsets * (double)parms.num_bytes)
/ ONE_MB);
else
@@ -669,7 +669,7 @@ run_test(iotype iot, parameters parms, struct options *opts)
get_minmax(&read_close_mm, t);
read_close_mm_table[i] = read_close_mm;
-
+
}
pio_time_destroy(res.timers);
@@ -739,7 +739,7 @@ run_test(iotype iot, parameters parms, struct options *opts)
}
-
+
if (!parms.h5_write_only) {
/* Read statistics */
/* Print the raw data throughput if desired */
@@ -1196,7 +1196,7 @@ report_parameters(struct options *opts)
HDfprintf(output, "2D\n");
else
HDfprintf(output, "1D\n");
-
+
HDfprintf(output, "rank %d: VFL used for HDF5 I/O=", rank);
if(opts->h5_use_mpi_posix)
HDfprintf(output, "MPI-posix driver\n");
diff --git a/perform/pio_standalone.c b/perform/pio_standalone.c
index 33d2ccc..2ad0916 100644
--- a/perform/pio_standalone.c
+++ b/perform/pio_standalone.c
@@ -107,7 +107,7 @@ get_option(int argc, const char **argv, const char *opts, const struct long_opti
opt_opt = argv[opt_ind][sp];
if (opt_opt == ':' || (cp = strchr(opts, opt_opt)) == 0) {
-
+
if (opt_err)
HDfprintf(stderr, "%s: unknown option \"%c\"\n",
argv[0], opt_opt);
diff --git a/perform/pio_timer.c b/perform/pio_timer.c
index a528e5d..3d0cb3e 100644
--- a/perform/pio_timer.c
+++ b/perform/pio_timer.c
@@ -38,11 +38,11 @@
/* global variables */
pio_time *timer_g; /* timer: global for stub functions */
-/*
+/*
* Function: sub_time
* Purpose: Struct two time values, and return the difference, in microseconds
- *
- * Note that the function assumes that a > b
+ *
+ * Note that the function assumes that a > b
* Programmer: Leon Arber, 1/27/06
*/
static double sub_time(struct timeval* a, struct timeval* b)
@@ -145,7 +145,7 @@ set_time(pio_time *pt, timer_type t, int start_stop)
} else {
pt->total_time[t] += MPI_Wtime() - pt->mpi_timer[t];
- pt->mpi_timer[t] = MPI_Wtime();
+ pt->mpi_timer[t] = MPI_Wtime();
/* When we stop the timer for HDF5_GROSS_WRITE_FIXED_DIMS or HDF5_GROSS_READ_FIXED_DIMS
* we compute the time it took to close the file after the last read/write finished */
@@ -165,7 +165,7 @@ set_time(pio_time *pt, timer_type t, int start_stop)
else if(t == HDF5_FINE_READ_FIXED_DIMS)
pt->total_time[HDF5_FILE_READ_OPEN] += sub_time(&(pt->sys_timer[t]), &(pt->sys_timer[HDF5_GROSS_READ_FIXED_DIMS]));
-
+
} else {
struct timeval sys_t;
@@ -183,7 +183,7 @@ set_time(pio_time *pt, timer_type t, int start_stop)
pt->total_time[HDF5_FILE_WRITE_CLOSE] += sub_time(&(pt->sys_timer[t]), &(pt->sys_timer[HDF5_FINE_WRITE_FIXED_DIMS]));
else if(t == HDF5_GROSS_READ_FIXED_DIMS)
pt->total_time[HDF5_FILE_READ_CLOSE] += sub_time(&(pt->sys_timer[t]), &(pt->sys_timer[HDF5_FINE_READ_FIXED_DIMS]));
-
+
}
}
diff --git a/src/H5.c b/src/H5.c
index 0bbe225..7ca10d5 100644
--- a/src/H5.c
+++ b/src/H5.c
@@ -3127,11 +3127,11 @@ H5_trace (const double *returning, const char *func, const char *type, ...)
* Function: HDrand/HDsrand
*
* Purpose: Wrapper function for rand. If rand_r exists on this system,
- * use it.
+ * use it.
*
* Wrapper function for srand. If rand_r is available, it will keep
* track of the seed locally instead of using srand() which modifies
- * global state and can break other programs.
+ * global state and can break other programs.
*
* Return: Success: Random number from 0 to RAND_MAX
*
@@ -3154,7 +3154,7 @@ int HDrand(void)
void HDsrand(unsigned int seed)
{
- g_seed = seed;
+ g_seed = seed;
}
#endif
diff --git a/src/H5A.c b/src/H5A.c
index abd70b5..43759b8 100644
--- a/src/H5A.c
+++ b/src/H5A.c
@@ -1071,7 +1071,7 @@ H5Aget_create_plist(hid_t attr_id)
/* Set the character encoding on the new property list */
if(H5P_set(new_plist, H5A_CHAR_ENCODING_NAME, &(attr->encoding)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set character encoding")
-
+
ret_value = new_plist_id;
done:
@@ -1700,7 +1700,7 @@ H5A_close(H5A_t *attr)
/* Free temporary buffer */
tmp_buf = H5FL_BLK_FREE(attr_buf, tmp_buf);
} /* end if */
-
+
/* Free dynamicly allocated items */
if(H5A_free(attr) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't release attribute info")
diff --git a/src/H5AC.c b/src/H5AC.c
index 798c0b5..cf794c6 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -78,19 +78,19 @@ H5FL_DEFINE_STATIC(H5AC_aux_t);
* structure H5AC_slist_entry_t
*
* The dirty entry list maintained via the d_slist_ptr field of H5AC_aux_t
- * and the cleaned entry list maintained via the c_slist_ptr field of
- * H5AC_aux_t are just lists of the file offsets of the dirty/cleaned
- * entries. Unfortunately, the slist code makes us define a dynamically
- * allocated structure to store these offsets in. This structure serves
+ * and the cleaned entry list maintained via the c_slist_ptr field of
+ * H5AC_aux_t are just lists of the file offsets of the dirty/cleaned
+ * entries. Unfortunately, the slist code makes us define a dynamically
+ * allocated structure to store these offsets in. This structure serves
* that purpose. Its fields are as follows:
*
- * magic: Unsigned 32 bit integer always set to
- * H5AC__H5AC_SLIST_ENTRY_T_MAGIC. This field is used to
+ * magic: Unsigned 32 bit integer always set to
+ * H5AC__H5AC_SLIST_ENTRY_T_MAGIC. This field is used to
* validate pointers to instances of H5AC_slist_entry_t.
- *
+ *
* addr: file offset of a metadata entry. Entries are added to this
* list (if they aren't there already) when they are marked
- * dirty in an unprotect, inserted, or renamed. They are
+ * dirty in an unprotect, inserted, or renamed. They are
* removed when they appear in a clean entries broadcast.
*
****************************************************************************/
@@ -455,9 +455,9 @@ H5AC_term_interface(void)
* through the function.
* JRM - 4/7/05
*
- * Added code allocating and initializing the auxilary
+ * Added code allocating and initializing the auxilary
* structure (an instance of H5AC_aux_t), and linking it
- * to the instance of H5C_t created by H5C_create(). At
+ * to the instance of H5C_t created by H5C_create(). At
* present, the auxilary structure is only used in PHDF5.
*
* JRM - 6/28/05
@@ -465,7 +465,7 @@ H5AC_term_interface(void)
* Added code to set the prefix if required.
*
* JRM - 1/20/06
- *
+ *
* Added code to initialize the new write_done field.
*
* JRM - 5/11/06
@@ -530,17 +530,17 @@ H5AC_create(const H5F_t *f,
if ( (mpi_rank = H5F_mpi_get_rank(f)) < 0 ) {
- HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi rank")
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi rank")
}
if ( (mpi_size = H5F_mpi_get_size(f)) < 0 ) {
- HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi size")
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi size")
}
- /* There is no point in setting up the auxilary structure if size
- * is less than or equal to 1, as there will never be any processes
- * to broadcast the clean lists to.
+ /* There is no point in setting up the auxilary structure if size
+ * is less than or equal to 1, as there will never be any processes
+ * to broadcast the clean lists to.
*/
if ( mpi_size > 1 ) {
@@ -550,13 +550,13 @@ H5AC_create(const H5F_t *f,
"Can't allocate H5AC auxilary structure.")
} else {
-
+
aux_ptr->magic = H5AC__H5AC_AUX_T_MAGIC;
aux_ptr->mpi_comm = mpi_comm;
aux_ptr->mpi_rank = mpi_rank;
aux_ptr->mpi_size = mpi_size;
aux_ptr->write_permitted = FALSE;
- aux_ptr->dirty_bytes_threshold =
+ aux_ptr->dirty_bytes_threshold =
H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD;
aux_ptr->dirty_bytes = 0;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
@@ -578,28 +578,28 @@ H5AC_create(const H5F_t *f,
}
if ( mpi_rank == 0 ) {
-
- aux_ptr->d_slist_ptr =
+
+ aux_ptr->d_slist_ptr =
H5SL_create(H5SL_TYPE_HADDR,0.5,(size_t)16);
if ( aux_ptr->d_slist_ptr == NULL ) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
"can't create dirtied entry list.")
}
-
- aux_ptr->c_slist_ptr =
+
+ aux_ptr->c_slist_ptr =
H5SL_create(H5SL_TYPE_HADDR,0.5,(size_t)16);
if ( aux_ptr->c_slist_ptr == NULL ) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
"can't create cleaned entry list.")
}
}
}
- if ( aux_ptr != NULL ) {
+ if ( aux_ptr != NULL ) {
if ( aux_ptr->mpi_rank == 0 ) {
@@ -661,7 +661,7 @@ H5AC_create(const H5F_t *f,
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
- }
+ }
#ifdef H5_HAVE_PARALLEL
else if ( aux_ptr != NULL ) {
@@ -739,10 +739,10 @@ done:
*
* JRM - 6/7/04
*
- * Added code to free the auxiliary structure and its
+ * Added code to free the auxiliary structure and its
* associated slist if present.
* JRM - 6/28/05
- *
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -857,8 +857,8 @@ done:
*
* Complete re-write. See above for details. -- JRM 5/11/04
*
- * Abstracted the guts of the function to H5C_flush_cache()
- * in H5C.c, and then re-wrote the function as a wrapper for
+ * Abstracted the guts of the function to H5C_flush_cache()
+ * in H5C.c, and then re-wrote the function as a wrapper for
* H5C_flush_cache().
*
* JRM - 6/7/04
@@ -895,9 +895,9 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id, unsigned flags)
if ( aux_ptr != NULL ) {
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d::H5AC_flush: (u/uu/i/iu/r/ru) = %d/%d/%d/%d/%d/%d\n",
- (int)(aux_ptr->mpi_rank),
+ (int)(aux_ptr->mpi_rank),
(int)(aux_ptr->unprotect_dirty_bytes),
(int)(aux_ptr->unprotect_dirty_bytes_updates),
(int)(aux_ptr->insert_dirty_bytes),
@@ -907,7 +907,7 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id, unsigned flags)
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
/* to prevent "messages from the future" we must synchronize all
- * processes before we start the flush. Hence the following
+ * processes before we start the flush. Hence the following
* barrier.
*/
if ( MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)) ) {
@@ -959,10 +959,10 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id, unsigned flags)
} /* end if ( aux_ptr != NULL ) */
#endif /* H5_HAVE_PARALLEL */
- status = H5C_flush_cache(f,
- dxpl_id,
- H5AC_noblock_dxpl_id,
- f->shared->cache,
+ status = H5C_flush_cache(f,
+ dxpl_id,
+ H5AC_noblock_dxpl_id,
+ f->shared->cache,
flags);
if ( status < 0 ) {
@@ -997,7 +997,7 @@ done:
* 4/27/06
*
* Modifications:
- *
+ *
* None.
*
*-------------------------------------------------------------------------
@@ -1020,7 +1020,7 @@ H5AC_get_entry_status(H5F_t * f,
FUNC_ENTER_NOAPI(H5AC_get_entry_status, FAIL)
- if ( ( cache_ptr == NULL ) ||
+ if ( ( cache_ptr == NULL ) ||
( cache_ptr->magic != H5C__H5C_T_MAGIC ) ||
( ! H5F_addr_defined(addr) ) ||
( status_ptr == NULL ) ) {
@@ -1041,16 +1041,16 @@ H5AC_get_entry_status(H5F_t * f,
status |= H5AC_ES__IN_CACHE;
- if ( is_dirty )
+ if ( is_dirty )
status |= H5AC_ES__IS_DIRTY;
- if ( is_protected )
+ if ( is_protected )
status |= H5AC_ES__IS_PROTECTED;
- if ( is_pinned )
+ if ( is_pinned )
status |= H5AC_ES__IS_PINNED;
}
-
+
*status_ptr = status;
done:
@@ -1210,7 +1210,7 @@ done:
* Purpose: Mark a pinned entry as dirty. The target entry MUST be
* be pinned, and MUST be unprotected.
*
- * If the entry has changed size, the function updates
+ * If the entry has changed size, the function updates
* data structures for the size change.
*
* Return: Non-negative on success/Negative on failure
@@ -1277,8 +1277,8 @@ H5AC_mark_pinned_entry_dirty(H5F_t * f,
}
#endif /* H5_HAVE_PARALLEL */
- result = H5C_mark_pinned_entry_dirty(cache_ptr,
- thing,
+ result = H5C_mark_pinned_entry_dirty(cache_ptr,
+ thing,
size_changed,
new_size);
if ( result < 0 ) {
@@ -1298,7 +1298,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5AC_mark_pinned_or_protected_entry_dirty
*
- * Purpose: Mark a pinned or protected entry as dirty. The target
+ * Purpose: Mark a pinned or protected entry as dirty. The target
* entry MUST be either pinned, protected, or both.
*
* Unlike H5AC_mark_pinned_entry_dirty(), this function does
@@ -1406,7 +1406,7 @@ done:
* the PHDF5 case. It should have no effect on either the
* serial or FPHSD5 cases.
*
- * Note that this code presumes that the renamed entry will
+ * Note that this code presumes that the renamed entry will
* be present in all caches -- which it must be at present.
* To maintain this invarient, only rename entries immediately
* after you unprotect them.
@@ -1461,7 +1461,7 @@ H5AC_rename(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t new_ad
if ( ( aux_ptr != NULL ) &&
( aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold ) ) {
- result = H5AC_propagate_flushed_and_still_clean_entries_list(f,
+ result = H5AC_propagate_flushed_and_still_clean_entries_list(f,
H5AC_noblock_dxpl_id,
f->shared->cache,
TRUE);
@@ -1525,7 +1525,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5AC_protect
*
- * Purpose: If the target entry is not in the cache, load it. If
+ * Purpose: If the target entry is not in the cache, load it. If
* necessary, attempt to evict one or more entries to keep
* the cache within its maximum size.
*
@@ -1533,8 +1533,8 @@ done:
* to the caller. The caller must call H5AC_unprotect() when
* finished with the entry.
*
- * While it is protected, the entry may not be either evicted
- * or flushed -- nor may it be accessed by another call to
+ * While it is protected, the entry may not be either evicted
+ * or flushed -- nor may it be accessed by another call to
* H5AC_protect. Any attempt to do so will result in a failure.
*
* This comment is a re-write of the original Purpose: section.
@@ -1575,8 +1575,8 @@ done:
* Purpose section above.
*
* JRM - 6/7/04
- * Abstracted the guts of the function to H5C_protect()
- * in H5C.c, and then re-wrote the function as a wrapper for
+ * Abstracted the guts of the function to H5C_protect()
+ * in H5C.c, and then re-wrote the function as a wrapper for
* H5C_protect().
*
*-------------------------------------------------------------------------
@@ -1630,7 +1630,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5AC_unpin_entry()
*
- * Purpose: Unpin a cache entry. The entry must be unprotected at
+ * Purpose: Unpin a cache entry. The entry must be unprotected at
* the time of call, and must be pinned.
*
* Return: Non-negative on success/Negative on failure
@@ -1741,18 +1741,18 @@ done:
*
* JRM - 7/5/05
* Added code to track dirty byte generation, and to trigger
- * clean entry list propagation when it exceeds a user
+ * clean entry list propagation when it exceeds a user
* specified threshold. Note that this code only applies in
* the PHDF5 case. It should have no effect on either the
* serial or FPHSD5 cases.
*
* JRM - 9/8/05
* Added code to track entry size changes. This is necessary
- * as it can effect dirty byte creation counts, thereby
+ * as it can effect dirty byte creation counts, thereby
* throwing the caches out of sync in the PHDF5 case.
*
* JRM - 5/16/06
- * Added code to use the new dirtied field in
+ * Added code to use the new dirtied field in
* H5C_cache_entry_t in the test to see if the entry has
* been dirtied.
*
@@ -1786,7 +1786,7 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
dirtied = ( ( (flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG ) ||
( ((H5AC_info_t *)thing)->dirtied ) );
- if ( dirtied ) {
+ if ( dirtied ) {
if ( (type->size)(f, thing, &new_size) < 0 ) {
@@ -1805,7 +1805,7 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
if ( ( dirtied ) && ( ((H5AC_info_t *)thing)->is_dirty == FALSE ) &&
( NULL != (aux_ptr = f->shared->cache->aux_ptr) ) ) {
- result = H5AC_log_dirtied_entry(f->shared->cache,
+ result = H5AC_log_dirtied_entry(f->shared->cache,
(H5AC_info_t *)thing,
addr,
size_changed,
@@ -1980,7 +1980,7 @@ done:
* Reworked for the addition of struct H5AC_cache_config_t.
*
* JRM - 10/25/05
- * Added support for the new dirty_bytes_threshold field of
+ * Added support for the new dirty_bytes_threshold field of
* both H5AC_cache_config_t and H5AC_aux_t.
*
*-------------------------------------------------------------------------
@@ -1996,21 +1996,21 @@ H5AC_get_cache_auto_resize_config(H5AC_t * cache_ptr,
FUNC_ENTER_NOAPI(H5AC_get_cache_auto_resize_config, FAIL)
- if ( ( cache_ptr == NULL )
+ if ( ( cache_ptr == NULL )
||
#ifdef H5_HAVE_PARALLEL
- ( ( cache_ptr->aux_ptr != NULL )
- &&
- ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
- !=
- H5AC__H5AC_AUX_T_MAGIC
+ ( ( cache_ptr->aux_ptr != NULL )
+ &&
+ ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
+ !=
+ H5AC__H5AC_AUX_T_MAGIC
)
- )
+ )
||
#endif /* H5_HAVE_PARALLEL */
- ( config_ptr == NULL )
+ ( config_ptr == NULL )
||
- ( config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION )
+ ( config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION )
)
{
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
@@ -2060,7 +2060,7 @@ H5AC_get_cache_auto_resize_config(H5AC_t * cache_ptr,
#ifdef H5_HAVE_PARALLEL
if ( cache_ptr->aux_ptr != NULL ) {
- config_ptr->dirty_bytes_threshold =
+ config_ptr->dirty_bytes_threshold =
((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold;
} else {
@@ -2225,7 +2225,7 @@ done:
* Updated for the addition of H5AC_cache_config_t.
*
* John Mainzer -- 1025/05
- * Added support for the new dirty_bytes_threshold field of
+ * Added support for the new dirty_bytes_threshold field of
* both H5AC_cache_config_t and H5AC_aux_t.
*
*-------------------------------------------------------------------------
@@ -2244,14 +2244,14 @@ H5AC_set_cache_auto_resize_config(H5AC_t * cache_ptr,
if ( ( cache_ptr == NULL )
#ifdef H5_HAVE_PARALLEL
||
- ( ( cache_ptr->aux_ptr != NULL )
- &&
- (
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
- !=
- H5AC__H5AC_AUX_T_MAGIC
+ ( ( cache_ptr->aux_ptr != NULL )
+ &&
+ (
+ ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic
+ !=
+ H5AC__H5AC_AUX_T_MAGIC
)
- )
+ )
#endif /* H5_HAVE_PARALLEL */
) {
@@ -2275,16 +2275,16 @@ H5AC_set_cache_auto_resize_config(H5AC_t * cache_ptr,
"config_ptr->rpt_fcn_enabled must be either TRUE or FALSE.")
}
- if (
- (
- config_ptr->dirty_bytes_threshold
- <
+ if (
+ (
+ config_ptr->dirty_bytes_threshold
+ <
H5AC__MIN_DIRTY_BYTES_THRESHOLD
)
||
- (
- config_ptr->dirty_bytes_threshold
- >
+ (
+ config_ptr->dirty_bytes_threshold
+ >
H5AC__MAX_DIRTY_BYTES_THRESHOLD
)
) {
@@ -2311,7 +2311,7 @@ H5AC_set_cache_auto_resize_config(H5AC_t * cache_ptr,
#ifdef H5_HAVE_PARALLEL
if ( cache_ptr->aux_ptr != NULL ) {
- ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold =
+ ((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold =
config_ptr->dirty_bytes_threshold;
}
#endif /* H5_HAVE_PARALLEL */
@@ -2381,7 +2381,7 @@ H5AC_validate_config(H5AC_cache_config_t * config_ptr)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
"dirty_bytes_threshold too small.")
- } else
+ } else
if ( config_ptr->dirty_bytes_threshold > H5AC__MAX_DIRTY_BYTES_THRESHOLD ) {
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
@@ -2423,7 +2423,7 @@ done:
* list, and also remove any matching entries from the dirtied
* slist.
*
- * This function must only be called by the process with
+ * This function must only be called by the process with
* MPI_rank 0.
*
* Return SUCCEED on success, and FAIL on failure.
@@ -2463,7 +2463,7 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
HDassert( aux_ptr->mpi_rank == 0 );
HDassert( aux_ptr->c_slist_ptr != NULL );
- HDassert( H5SL_count(aux_ptr->c_slist_ptr) ==
+ HDassert( H5SL_count(aux_ptr->c_slist_ptr) ==
(size_t)(aux_ptr->c_slist_len) );
@@ -2479,7 +2479,7 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
- }
+ }
if ( num_entries > 0 )
{
@@ -2518,7 +2518,7 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
i++;
/* now remove the entry from the cleaned entry list */
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -2533,17 +2533,17 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
HDassert( aux_ptr->c_slist_len >= 0 );
- /* and also remove the matching entry from the dirtied list
+ /* and also remove the matching entry from the dirtied list
* if it exists.
*/
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
(void *)(&addr))) != NULL ) {
- HDassert( slist_entry_ptr->magic ==
+ HDassert( slist_entry_ptr->magic ==
H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -2562,15 +2562,15 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
} /* while */
- /* Now broadcast the list of cleaned entries -- if there is one.
+ /* Now broadcast the list of cleaned entries -- if there is one.
*
* The peculiar structure of the following call to MPI_Bcast is
* due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
+ * Thus the element type is MPI_BYTE, with size equal to the
* buf_size computed above.
*/
- mpi_result = MPI_Bcast((void *)buf_ptr, (int)buf_size, MPI_BYTE, 0,
+ mpi_result = MPI_Bcast((void *)buf_ptr, (int)buf_size, MPI_BYTE, 0,
aux_ptr->mpi_comm);
if ( mpi_result != MPI_SUCCESS ) {
@@ -2612,11 +2612,11 @@ done:
* Modifications:
*
* John Mainzer, 9/23/05
- * Rewrote function to return the value of the
+ * Rewrote function to return the value of the
* write_permitted field in aux structure if the structure
- * exists and mpi_rank is 0.
+ * exists and mpi_rank is 0.
*
- * If the aux structure exists, but mpi_rank isn't 0, the
+ * If the aux structure exists, but mpi_rank isn't 0, the
* function now returns FALSE.
*
* In all other cases, the function returns TRUE.
@@ -2679,7 +2679,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5AC_ext_config_2_int_config()
*
- * Purpose: Utility function to translate an instance of
+ * Purpose: Utility function to translate an instance of
* H5AC_cache_config_t to an instance of H5C_auto_size_ctl_t.
*
* Places translation in *int_conf_ptr and returns SUCCEED
@@ -2808,14 +2808,14 @@ H5AC_log_deleted_entry(H5AC_t * cache_ptr,
HDassert( aux_ptr->c_slist_ptr != NULL );
/* if the entry appears in the dirtied entry slist, remove it. */
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
(void *)(&addr))) != NULL ) {
- HDassert( slist_entry_ptr->magic ==
+ HDassert( slist_entry_ptr->magic ==
H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -2832,14 +2832,14 @@ H5AC_log_deleted_entry(H5AC_t * cache_ptr,
}
/* if the entry appears in the cleaned entry slist, remove it. */
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
(void *)(&addr))) != NULL ) {
- HDassert( slist_entry_ptr->magic ==
+ HDassert( slist_entry_ptr->magic ==
H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -2876,7 +2876,7 @@ done:
* If mpi_rank is 0, we must first check to see if the entry
* appears in the dirty entries slist. If it is, do nothing.
* If it isn't, add the size to th dirty_bytes count, add the
- * entry to the dirty entries slist, and remove it from the
+ * entry to the dirty entries slist, and remove it from the
* cleaned list (if it is present there).
*
* Return SUCCEED on success, and FAIL on failure.
@@ -2965,14 +2965,14 @@ H5AC_log_dirtied_entry(H5AC_t * cache_ptr,
/* the entry is dirty. If it exists on the cleaned entries list,
* remove it.
*/
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
(void *)(&addr))) != NULL ) {
- HDassert( slist_entry_ptr->magic ==
+ HDassert( slist_entry_ptr->magic ==
H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -3010,12 +3010,12 @@ done:
* Function: H5AC_log_flushed_entry()
*
* Purpose: Update the clean entry slist for the flush of an entry --
- * specifically, if the entry has been cleared, remove it
+ * specifically, if the entry has been cleared, remove it
* from both the cleaned and dirtied lists if it is present.
- * Otherwise, if the entry was dirty, insert the indicated
+ * Otherwise, if the entry was dirty, insert the indicated
* entry address in the clean slist if it isn't there already.
*
- * This function is only used in PHDF5, and should only
+ * This function is only used in PHDF5, and should only
* be called for the process with mpi rank 0.
*
* Return SUCCEED on success, and FAIL on failure.
@@ -3046,8 +3046,8 @@ H5AC_log_flushed_entry_dummy(H5C_t * cache_ptr,
aux_ptr = cache_ptr->aux_ptr;
if ( ( was_dirty ) && ( (flags & H5C__FLUSH_CLEAR_ONLY_FLAG) == 0 ) ) {
-
- HDfprintf(stdout,
+
+ HDfprintf(stdout,
"%d:H5AC_log_flushed_entry(): addr = %d, flags = %x, was_dirty = %d, type_id = %d\n",
(int)(aux_ptr->mpi_rank), (int)addr, flags, (int)was_dirty, type_id);
}
@@ -3091,13 +3091,13 @@ H5AC_log_flushed_entry(H5C_t * cache_ptr,
* cleaned list and the dirtied list.
*/
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
(void *)(&addr))) != NULL ) {
HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -3113,13 +3113,13 @@ H5AC_log_flushed_entry(H5C_t * cache_ptr,
HDassert( aux_ptr->c_slist_len >= 0 );
}
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
+ if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
(void *)(&addr))) != NULL ) {
HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
+ if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
!= slist_entry_ptr ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
@@ -3177,7 +3177,7 @@ done:
* If mpi_rank isnt 0, this simply means adding the size
* of the entry to the dirty_bytes count.
*
- * If mpi_rank is 0, we must also add the entry to the
+ * If mpi_rank is 0, we must also add the entry to the
* dirty entries slist.
*
* Return SUCCEED on success, and FAIL on failure.
@@ -3246,7 +3246,7 @@ H5AC_log_inserted_entry(H5F_t * f,
slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
slist_entry_ptr->addr = addr;
- if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
+ if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
&(slist_entry_ptr->addr)) < 0 ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
@@ -3266,7 +3266,7 @@ H5AC_log_inserted_entry(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Inserted entry in clean slist.")
}
- }
+ }
aux_ptr->dirty_bytes += size;
@@ -3292,14 +3292,14 @@ done:
* WARNING
*
* At present, the way that the rename call is used ensures
- * that the renamed entry is present in all caches by
+ * that the renamed entry is present in all caches by
* renaming in a collective operation and immediately after
* unprotecting the target entry.
*
* This function uses this invarient, and will cause arcane
* failures if it is not met. If maintaining this invarient
* becomes impossible, we will have to rework this function
- * extensively, and likely include a bit of IPC for
+ * extensively, and likely include a bit of IPC for
* synchronization. A better option might be to subsume
* rename in the unprotect operation.
*
@@ -3308,17 +3308,17 @@ done:
*
* For processes with mpi rank other 0, it simply checks to
* see if the entry was dirty prior to the rename, and adds
- * the entries size to the dirty bytes count.
+ * the entries size to the dirty bytes count.
*
* In the process with mpi rank 0, the function first checks
* to see if the entry was dirty prior to the rename. If it
* was, and if the entry doesn't appear in the dirtied list
- * under its old address, it adds the entry's size to the
+ * under its old address, it adds the entry's size to the
* dirty bytes count.
*
- * The rank 0 process then removes any references to the
- * entry under its old address from the cleands and dirtied
- * lists, and inserts an entry in the dirtied list under the
+ * The rank 0 process then removes any references to the
+ * entry under its old address from the cleands and dirtied
+ * lists, and inserts an entry in the dirtied list under the
* new address.
*
* Return SUCCEED on success, and FAIL on failure.
@@ -3373,7 +3373,7 @@ H5AC_log_renamed_entry(H5AC_t * cache_ptr,
HDassert( aux_ptr->c_slist_ptr != NULL );
/* if the entry appears in the cleaned entry slist, under its old
- * address, remove it.
+ * address, remove it.
*/
if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
(void *)(&old_addr))) != NULL ) {
@@ -3422,9 +3422,9 @@ H5AC_log_renamed_entry(H5AC_t * cache_ptr,
HDassert( aux_ptr->d_slist_len >= 0 );
} else {
-
- /* otherwise, allocate a new entry that is ready
- * for insertion, and increment dirty_bytes.
+
+ /* otherwise, allocate a new entry that is ready
+ * for insertion, and increment dirty_bytes.
*
* Note that the fact that the entry wasn't in the dirtied
* list under its old address implies that it must have
@@ -3489,12 +3489,12 @@ done:
* Function: H5AC_propagate_flushed_and_still_clean_entries_list
*
* Purpose: In PHDF5, only the metadata cache with mpi rank 0 is allowed
- * to write to file. All other metadata caches on processes
+ * to write to file. All other metadata caches on processes
* with rank greater than 0 must retain dirty entries until
* they are notified that the entry is now clean.
*
* This function is the main routine for that proceedure.
- * It must be called simultaniously on all processes that
+ * It must be called simultaniously on all processes that
* have the relevant file open. To this end, there must
* be a barrier immediately prior to this call.
*
@@ -3502,21 +3502,21 @@ done:
*
* 1) Dirty byte creation exceeds some user specified value.
*
- * While metadata reads may occur independently, all
+ * While metadata reads may occur independently, all
* operations writing metadata must be collective. Thus
* all metadata caches see the same sequence of operations,
* and therefore the same dirty data creation.
*
* This fact is used to synchronize the caches for purposes
* of propagating the list of flushed and still clean
- * entries, by simply calling this function from all
+ * entries, by simply calling this function from all
* caches whenever some user specified threshold on dirty
* data is exceeded.
*
- * 2) Under direct user control -- this operation must be
+ * 2) Under direct user control -- this operation must be
* collective.
*
- * The operations to be managed by this function are as
+ * The operations to be managed by this function are as
* follows:
*
* For the process with mpi rank 0:
@@ -3525,10 +3525,10 @@ done:
* and then disable writes again.
*
* 2) Load the contents of the flushed and still clean entries
- * list (c_slist_ptr) into a buffer, and broadcast that
+ * list (c_slist_ptr) into a buffer, and broadcast that
* buffer to all the other caches.
*
- * 3) Clear the flushed and still clean entries list
+ * 3) Clear the flushed and still clean entries list
* (c_slist_ptr).
*
*
@@ -3542,7 +3542,7 @@ done:
* For all processes:
*
* 1) Reset the dirtied bytes count to 0.
- *
+ *
* Return: Success: non-negative
*
* Failure: negative
@@ -3560,8 +3560,8 @@ done:
#ifdef H5_HAVE_PARALLEL
herr_t
-H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
- hid_t dxpl_id,
+H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
+ hid_t dxpl_id,
H5AC_t * cache_ptr,
hbool_t do_barrier)
{
@@ -3581,10 +3581,10 @@ H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:H5AC_propagate...:%d: (u/uu/i/iu/r/ru) = %d/%d/%d/%d/%d/%d\n",
- (int)(aux_ptr->mpi_rank),
- (int)(aux_ptr->dirty_bytes_propagations),
+ (int)(aux_ptr->mpi_rank),
+ (int)(aux_ptr->dirty_bytes_propagations),
(int)(aux_ptr->unprotect_dirty_bytes),
(int)(aux_ptr->unprotect_dirty_bytes_updates),
(int)(aux_ptr->insert_dirty_bytes),
@@ -3610,7 +3610,7 @@ H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
aux_ptr->write_permitted = TRUE;
- result = H5C_flush_to_min_clean(f, dxpl_id, H5AC_noblock_dxpl_id,
+ result = H5C_flush_to_min_clean(f, dxpl_id, H5AC_noblock_dxpl_id,
cache_ptr);
aux_ptr->write_permitted = FALSE;
@@ -3636,7 +3636,7 @@ H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
} else {
- if ( H5AC_receive_and_apply_clean_list(f, dxpl_id,
+ if ( H5AC_receive_and_apply_clean_list(f, dxpl_id,
H5AC_noblock_dxpl_id,
cache_ptr) < 0 ) {
@@ -3671,7 +3671,7 @@ done:
* Purpose: Receive the list of cleaned entries from process 0,
* and mark the specified entries as clean.
*
- * This function must only be called by the process with
+ * This function must only be called by the process with
* MPI_rank greater than 0.
*
* Return SUCCEED on success, and FAIL on failure.
@@ -3737,7 +3737,7 @@ H5AC_receive_and_apply_clean_list(H5F_t * f,
"memory allocation failed for receive buffer")
}
- haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) *
+ haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) *
(size_t)num_entries);
if ( haddr_buf_ptr == NULL ) {
@@ -3747,15 +3747,15 @@ H5AC_receive_and_apply_clean_list(H5F_t * f,
}
- /* Now receive the list of cleaned entries
+ /* Now receive the list of cleaned entries
*
* The peculiar structure of the following call to MPI_Bcast is
* due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
+ * Thus the element type is MPI_BYTE, with size equal to the
* buf_size computed above.
*/
- mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size,
+ mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size,
MPI_BYTE, 0, aux_ptr->mpi_comm);
if ( mpi_result != MPI_SUCCESS ) {
@@ -3795,7 +3795,7 @@ done:
if ( MPI_Offset_buf_ptr != NULL ) {
- MPI_Offset_buf_ptr =
+ MPI_Offset_buf_ptr =
(MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
}
diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h
index e31f245..fc51ef0 100644
--- a/src/H5ACpkg.h
+++ b/src/H5ACpkg.h
@@ -21,7 +21,7 @@
* Source files outside the H5AC package should include
* H5ACprivate.h instead.
*
- * The one exception to this rule is testpar/t_cache.c. The
+ * The one exception to this rule is testpar/t_cache.c. The
* test code is easier to write if it can look at H5AC_aux_t.
* Indeed, this is the main reason why this file was created.
*/
@@ -51,9 +51,9 @@
#define H5AC_DEBUG_DIRTY_BYTES_CREATION 0
/*-------------------------------------------------------------------------
- * It is a bit difficult to set ranges of allowable values on the
- * dirty_bytes_threshold field of H5AC_aux_t. The following are
- * probably broader than they should be.
+ * It is a bit difficult to set ranges of allowable values on the
+ * dirty_bytes_threshold field of H5AC_aux_t. The following are
+ * probably broader than they should be.
*-------------------------------------------------------------------------
*/
@@ -71,71 +71,71 @@
* are some features of the metadata cache that are specific to it, and which
* therefore do not belong in the more generic H5C cache code.
*
- * In particular, there is the matter of synchronizing writes from the
+ * In particular, there is the matter of synchronizing writes from the
* metadata cache to disk in the PHDF5 case.
*
- * Prior to this update, the presumption was that all metadata caches would
- * write the same data at the same time since all operations modifying
- * metadata must be performed collectively. Given this assumption, it was
- * safe to allow only the writes from process 0 to actually make it to disk,
+ * Prior to this update, the presumption was that all metadata caches would
+ * write the same data at the same time since all operations modifying
+ * metadata must be performed collectively. Given this assumption, it was
+ * safe to allow only the writes from process 0 to actually make it to disk,
* while metadata writes from all other processes were discarded.
*
- * Unfortunately, this presumption is in error as operations that read
- * metadata need not be collective, but can change the location of dirty
- * entries in the metadata cache LRU lists. This can result in the same
- * metadata write operation triggering writes from the metadata caches on
- * some processes, but not all (causing a hang), or in different sets of
- * entries being written from different caches (potentially resulting in
+ * Unfortunately, this presumption is in error as operations that read
+ * metadata need not be collective, but can change the location of dirty
+ * entries in the metadata cache LRU lists. This can result in the same
+ * metadata write operation triggering writes from the metadata caches on
+ * some processes, but not all (causing a hang), or in different sets of
+ * entries being written from different caches (potentially resulting in
* metadata corruption in the file).
*
* To deal with this issue, I decided to apply a paradigm shift to the way
* metadata is written to disk.
*
- * With this set of changes, only the metadata cache on process 0 is able
- * to write metadata to disk, although metadata caches on all other
+ * With this set of changes, only the metadata cache on process 0 is able
+ * to write metadata to disk, although metadata caches on all other
* processes can read metadata from disk as before.
*
* To keep all the other caches from getting plugged up with dirty metadata,
* process 0 periodically broadcasts a list of entries that it has flushed
* since that last notice, and which are currently clean. The other caches
- * mark these entries as clean as well, which allows them to evict the
+ * mark these entries as clean as well, which allows them to evict the
* entries as needed.
*
* One obvious problem in this approach is synchronizing the broadcasts
- * and receptions, as different caches may see different amounts of
- * activity.
+ * and receptions, as different caches may see different amounts of
+ * activity.
*
- * The current solution is for the caches to track the number of bytes
- * of newly generated dirty metadata, and to broadcast and receive
+ * The current solution is for the caches to track the number of bytes
+ * of newly generated dirty metadata, and to broadcast and receive
* whenever this value exceeds some user specified threshold.
*
* Maintaining this count is easy for all processes not on process 0 --
- * all that is necessary is to add the size of the entry to the total
+ * all that is necessary is to add the size of the entry to the total
* whenever there is an insertion, a rename of a previously clean entry,
* or whever a previously clean entry is marked dirty in an unprotect.
*
* On process 0, we have to be careful not to count dirty bytes twice.
- * If an entry is marked dirty, flushed, and marked dirty again, all
- * within a single reporting period, it only th first marking should
- * be added to the dirty bytes generated tally, as that is all that
+ * If an entry is marked dirty, flushed, and marked dirty again, all
+ * within a single reporting period, it only th first marking should
+ * be added to the dirty bytes generated tally, as that is all that
* the other processes will see.
*
* At present, this structure exists to maintain the fields needed to
* implement the above scheme, and thus is only used in the parallel
* case. However, other uses may arise in the future.
*
- * Instance of this structure are associated with metadata caches via
- * the aux_ptr field of H5C_t (see H5Cpkg.h). The H5AC code is
+ * Instance of this structure are associated with metadata caches via
+ * the aux_ptr field of H5C_t (see H5Cpkg.h). The H5AC code is
* responsible for allocating, maintaining, and discarding instances
- * of H5AC_aux_t.
+ * of H5AC_aux_t.
*
* The remainder of this header comments documents the individual fields
* of the structure.
*
* JRM - 6/27/05
*
- * magic: Unsigned 32 bit integer always set to
- * H5AC__H5AC_AUX_T_MAGIC. This field is used to validate
+ * magic: Unsigned 32 bit integer always set to
+ * H5AC__H5AC_AUX_T_MAGIC. This field is used to validate
* pointers to instances of H5AC_aux_t.
*
* mpi_comm: MPI communicator associated with the file for which the
@@ -146,14 +146,14 @@
* mpi_size: Number of processes in mpi_comm.
*
* write_permitted: Boolean flag used to control whether the cache
- * is permitted to write to file.
+ * is permitted to write to file.
*
- * dirty_bytes_threshold: Integer field containing the dirty bytes
- * generation threashold. Whenever dirty byte creation
- * exceeds this value, the metadata cache on process 0
+ * dirty_bytes_threshold: Integer field containing the dirty bytes
+ * generation threashold. Whenever dirty byte creation
+ * exceeds this value, the metadata cache on process 0
* broadcasts a list of the entries it has flushed since
* the last broadcast (or since the beginning of execution)
- * and which are currently clean (if they are still in the
+ * and which are currently clean (if they are still in the
* cache)
*
* Similarly, metadata caches on processes other than process
@@ -161,16 +161,16 @@
* the threshold is exceeded.
*
* dirty_bytes: Integer field containing the number of bytes of dirty
- * metadata generated since the beginning of the computation,
- * or (more typically) since the last clean entries list
+ * metadata generated since the beginning of the computation,
+ * or (more typically) since the last clean entries list
* broadcast. This field is reset to zero after each such
* broadcast.
*
- * dirty_bytes_propagations: This field only exists when the
+ * dirty_bytes_propagations: This field only exists when the
* H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
*
* It is used to track the number of times the cleaned list
- * has been propagated from process 0 to the other
+ * has been propagated from process 0 to the other
* processes.
*
* unprotect_dirty_bytes: This field only exists when the
@@ -184,7 +184,7 @@
* H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
*
* It is used to track the number of times dirty bytes have
- * been created via unprotect operations since the last time
+ * been created via unprotect operations since the last time
* the cleaned list was propagated.
*
* insert_dirty_bytes: This field only exists when the
@@ -198,7 +198,7 @@
* H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
*
* It is used to track the number of times dirty bytes have
- * been created via insert operations since the last time
+ * been created via insert operations since the last time
* the cleaned list was propagated.
*
* rename_dirty_bytes: This field only exists when the
@@ -212,7 +212,7 @@
* H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
*
* It is used to track the number of times dirty bytes have
- * been created via rename operations since the last time
+ * been created via rename operations since the last time
* the cleaned list was propagated.
*
* d_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
@@ -231,36 +231,36 @@
* 2) a previously clean entry is renamed, and it does not
* already appear in the dirty entry list, or
*
- * 3) a previously clean entry is unprotected with the
- * dirtied flag set and the entry does not already appear
+ * 3) a previously clean entry is unprotected with the
+ * dirtied flag set and the entry does not already appear
* in the dirty entry list.
*
* Entries are added to the dirty entry list whever they cause
- * the dirty bytes count to be increased. They are removed
+ * the dirty bytes count to be increased. They are removed
* when they appear in a clean entries broadcast. Note that
* renames must be reflected in the dirty entry list.
*
- * To reitterate, this field is only used on process 0 -- it
+ * To reitterate, this field is only used on process 0 -- it
* should be NULL on all other processes.
*
- * d_slist_len: Integer field containing the number of entries in the
- * dirty entry list. This field should always contain the
+ * d_slist_len: Integer field containing the number of entries in the
+ * dirty entry list. This field should always contain the
* value 0 on all processes other than process 0. It exists
* primarily for sanity checking.
*
- * c_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
+ * c_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
* of entries that were dirty, have been flushed
* to disk since the last clean entries broadcast, and are
* still clean. Since only process 0 can write to disk, this
* list only exists on process 0.
*
* In essence, this slist is used to assemble the contents of
- * the next clean entries broadcast. The list emptied after
+ * the next clean entries broadcast. The list emptied after
* each broadcast.
- *
+ *
* c_slist_len: Integer field containing the number of entries in the clean
- * entries list (*c_slist_ptr). This field should always
- * contain the value 0 on all processes other than process 0.
+ * entries list (*c_slist_ptr). This field should always
+ * contain the value 0 on all processes other than process 0.
* It exists primarily for sanity checking.
*
* write_done: In the parallel test bed, it is necessary to ensure that
@@ -297,7 +297,7 @@ typedef struct H5AC_aux_t
int32_t dirty_bytes;
-#if H5AC_DEBUG_DIRTY_BYTES_CREATION
+#if H5AC_DEBUG_DIRTY_BYTES_CREATION
int32_t dirty_bytes_propagations;
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 6dcd88c..c3433be 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -229,7 +229,7 @@ extern hid_t H5AC_ind_dxpl_id;
#define H5AC__FLUSH_MARKED_ENTRIES_FLAG H5C__FLUSH_MARKED_ENTRIES_FLAG
-/* #defines of flags used to report entry status in the
+/* #defines of flags used to report entry status in the
* H5AC_get_entry_status() call.
*/
@@ -261,7 +261,7 @@ H5_DLL herr_t H5AC_mark_pinned_entry_dirty(H5F_t * f,
void * thing,
hbool_t size_changed,
size_t new_size);
-H5_DLL herr_t H5AC_mark_pinned_or_protected_entry_dirty(H5F_t * f,
+H5_DLL herr_t H5AC_mark_pinned_or_protected_entry_dirty(H5F_t * f,
void * thing);
H5_DLL herr_t H5AC_rename(H5F_t *f, const H5AC_class_t *type,
haddr_t old_addr, haddr_t new_addr);
diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h
index 5db8c5a..81d3319 100644
--- a/src/H5ACpublic.h
+++ b/src/H5ACpublic.h
@@ -40,16 +40,16 @@ extern "C" {
* structure H5AC_cache_config_t
*
* H5AC_cache_config_t is a public structure intended for use in public APIs.
- * At least in its initial incarnation, it is basicaly a copy of struct
- * H5C_auto_size_ctl_t, minus the report_fcn field, and plus the
- * dirty_bytes_threshold field.
+ * At least in its initial incarnation, it is basicaly a copy of struct
+ * H5C_auto_size_ctl_t, minus the report_fcn field, and plus the
+ * dirty_bytes_threshold field.
*
- * The report_fcn field is omitted, as including it would require us to
+ * The report_fcn field is omitted, as including it would require us to
* make H5C_t structure public.
*
* The dirty_bytes_threshold field does not appear in H5C_auto_size_ctl_t,
* as synchronization between caches on different processes is handled at
- * the H5AC level, not at the level of H5C. Note however that there is
+ * the H5AC level, not at the level of H5C. Note however that there is
* considerable interaction between this value and the other fields in this
* structure.
*
@@ -235,31 +235,31 @@ extern "C" {
* The value of this field must be in the range [0.0, 1.0]. I would
* expect typical values to be in the range of 0.01 to 0.1.
*
- *
+ *
* Parallel Configuration Fields:
*
* In PHDF5, all operations that modify metadata must be executed collectively.
- * We used to think that this was enough to ensure consistency across the
+ * We used to think that this was enough to ensure consistency across the
* metadata caches, but since we allow processes to read metadata individually,
- * the order of dirty entries in the LRU list can vary across processes,
+ * the order of dirty entries in the LRU list can vary across processes,
* which can result in inconsistencies between the caches.
*
- * To prevent this, only the metadata cache on process 0 is allowed to write
+ * To prevent this, only the metadata cache on process 0 is allowed to write
* to file, and then only after synchronizing with the other caches. After
* it writes entries to file, it sends the base addresses of the now clean
* entries to the other caches, so they can mark these entries clean as well.
*
- * The different caches know when to synchronize caches by counting the
+ * The different caches know when to synchronize caches by counting the
* number of bytes of dirty metadata created by the collective operations
- * modifying metadata. Whenever this count exceeds a user specified
- * threshold (see below), process 0 flushes down to its minimum clean size,
+ * modifying metadata. Whenever this count exceeds a user specified
+ * threshold (see below), process 0 flushes down to its minimum clean size,
* and then sends the list of newly cleaned entries to the other caches.
*
- * dirty_bytes_threshold: Threshold of dirty byte creation used to
- * synchronize updates between caches. (See above for outline and
+ * dirty_bytes_threshold: Threshold of dirty byte creation used to
+ * synchronize updates between caches. (See above for outline and
* motivation.)
*
- * This value MUST be consistant across all processes accessing the
+ * This value MUST be consistant across all processes accessing the
* file. This field is ignored unless HDF5 has been compiled for
* parallel.
*
diff --git a/src/H5B2private.h b/src/H5B2private.h
index 537921f..6f90ef6 100644
--- a/src/H5B2private.h
+++ b/src/H5B2private.h
@@ -98,7 +98,7 @@ typedef struct H5B2_class_t {
/*****************************/
/* Library-private Variables */
/*****************************/
-
+
/***************************************/
/* Library-private Function Prototypes */
/***************************************/
diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h
index 246d0c5..c94d1a6 100644
--- a/src/H5Bprivate.h
+++ b/src/H5Bprivate.h
@@ -134,7 +134,7 @@ typedef struct H5B_class_t {
/*****************************/
/* Library-private Variables */
/*****************************/
-
+
/* Declare a free list to manage the H5B_shared_t struct */
H5FL_EXTERN(H5B_shared_t);
diff --git a/src/H5C.c b/src/H5C.c
index 15fc671..dd0d56d 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -201,11 +201,11 @@
* JRM - 1/10/05
*
* - Added the H5C__DLL_UPDATE_FOR_SIZE_CHANGE macro and the associated
- * sanity checking macros. These macro are used to update the size of
+ * sanity checking macros. These macro are used to update the size of
* a DLL when one of its entries changes size.
*
* JRM - 9/8/05
- *
+ *
****************************************************************************/
#if H5C_DO_SANITY_CHECKS
@@ -294,7 +294,7 @@ if ( ( (new_size) > (dll_size) ) || \
#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)
#define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv)
#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)
-#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
+#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
#endif /* H5C_DO_SANITY_CHECKS */
@@ -1174,9 +1174,9 @@ if ( ( (cache_ptr) == NULL ) || \
* Macro: H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS
*
* Purpose: For efficiency, we sometimes change the order of flushes --
- * but doing so can confuse the replacement policy. This
- * macro exists to allow us to specify an entry as the
- * most recently touched so we can repair any such
+ * but doing so can confuse the replacement policy. This
+ * macro exists to allow us to specify an entry as the
+ * most recently touched so we can repair any such
* confusion.
*
* At present, we only support the modified LRU policy, so
@@ -1192,7 +1192,7 @@ if ( ( (cache_ptr) == NULL ) || \
*
* JRM -- 3/20/06
* Modified macro to ignore pinned entries. Pinned entries
- * do not appear in the data structures maintained by the
+ * do not appear in the data structures maintained by the
* replacement policy code, and thus this macro has nothing
* to do if called for such an entry.
*
@@ -1831,7 +1831,7 @@ if ( ( (cache_ptr) == NULL ) || \
* JRM - 3/17/06
* Modified macro to do nothing if the entry is pinned.
* In this case, the entry is on the pinned entry list, not
- * in the replacement policy data structures, so there is
+ * in the replacement policy data structures, so there is
* nothing to be done.
*
*-------------------------------------------------------------------------
@@ -1947,7 +1947,7 @@ if ( ( (cache_ptr) == NULL ) || \
* unpin of the specified cache entry.
*
* To do this, unlink the specified entry from the protected
- * entry list, and re-insert it in the data structures used
+ * entry list, and re-insert it in the data structures used
* by the current replacement policy.
*
* At present, we only support the modified LRU policy, so
@@ -2271,7 +2271,7 @@ static herr_t H5C_make_space_in_cache(H5F_t * f,
hbool_t * first_flush_ptr);
#if H5C_DO_EXTREME_SANITY_CHECKS
static herr_t H5C_validate_lru_list(H5C_t * cache_ptr);
-static herr_t H5C_verify_not_in_index(H5C_t * cache_ptr,
+static herr_t H5C_verify_not_in_index(H5C_t * cache_ptr,
H5C_cache_entry_t * entry_ptr);
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2444,7 +2444,7 @@ done:
* size control data structures.
*
* JRM -- 6/24/05
- * Added support for the new write_permitted field of
+ * Added support for the new write_permitted field of
* the H5C_t structure.
*
* JRM -- 7/5/05
@@ -2683,7 +2683,7 @@ done:
* ageout method of cache size reduction.
*
* JRM -- 1/19/06
- * Updated function for display the new prefix field of
+ * Updated function for display the new prefix field of
* H5C_t in output.
*
*-------------------------------------------------------------------------
@@ -2705,8 +2705,8 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
switch ( status )
{
case in_spec:
- HDfprintf(stdout,
- "%sAuto cache resize -- no change. (hit rate = %lf)\n",
+ HDfprintf(stdout,
+ "%sAuto cache resize -- no change. (hit rate = %lf)\n",
cache_ptr->prefix, hit_rate);
break;
@@ -2714,9 +2714,9 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
HDassert( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold );
HDassert( old_max_cache_size < new_max_cache_size );
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
- cache_ptr->prefix, hit_rate,
+ cache_ptr->prefix, hit_rate,
(cache_ptr->resize_ctl).lower_hr_threshold);
HDfprintf(stdout,
@@ -2749,7 +2749,7 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
case H5C_decr__age_out:
HDfprintf(stdout,
- "%sAuto cache resize -- decrease by ageout. HR = %lf\n",
+ "%sAuto cache resize -- decrease by ageout. HR = %lf\n",
cache_ptr->prefix, hit_rate);
break;
@@ -2765,7 +2765,7 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
default:
HDfprintf(stdout,
- "%sAuto cache resize -- decrease by unknown mode. HR = %lf\n",
+ "%sAuto cache resize -- decrease by unknown mode. HR = %lf\n",
cache_ptr->prefix, hit_rate);
}
@@ -2779,43 +2779,43 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
break;
case at_max_size:
- HDfprintf(stdout,
- "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
- cache_ptr->prefix, hit_rate,
+ HDfprintf(stdout,
+ "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
+ cache_ptr->prefix, hit_rate,
(cache_ptr->resize_ctl).lower_hr_threshold);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s cache already at maximum size so no change.\n",
cache_ptr->prefix);
break;
case at_min_size:
- HDfprintf(stdout,
- "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n",
+ HDfprintf(stdout,
+ "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n",
cache_ptr->prefix, hit_rate);
- HDfprintf(stdout, "%s cache already at minimum size.\n",
+ HDfprintf(stdout, "%s cache already at minimum size.\n",
cache_ptr->prefix);
break;
case increase_disabled:
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%sAuto cache resize -- increase disabled -- HR = %lf.",
cache_ptr->prefix, hit_rate);
break;
case decrease_disabled:
- HDfprintf(stdout,
- "%sAuto cache resize -- decrease disabled -- HR = %lf.\n",
+ HDfprintf(stdout,
+ "%sAuto cache resize -- decrease disabled -- HR = %lf.\n",
cache_ptr->prefix, hit_rate);
break;
case not_full:
HDassert( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold );
- HDfprintf(stdout,
- "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
- cache_ptr->prefix, hit_rate,
+ HDfprintf(stdout,
+ "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
+ cache_ptr->prefix, hit_rate,
(cache_ptr->resize_ctl).lower_hr_threshold);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s cache not full so no increase in size.\n",
cache_ptr->prefix);
break;
@@ -3004,7 +3004,7 @@ done:
* the marked entries.
*
* JRM -- 10/15/05
- * Added code supporting the new
+ * Added code supporting the new
* H5C__FLUSH_IGNORE_PROTECTED_FLAG. We need this flag, as
* we now use this function to flush large number of entries
* in increasing address order. We do this by marking the
@@ -3012,10 +3012,10 @@ done:
* and then restoring LRU order.
*
* However, it is possible that the cache will contain other,
- * unmarked protected entries, when we make this call. This
+ * unmarked protected entries, when we make this call. This
* new flag allows us to ignore them.
*
- * Note that even with this flag set, it is still an error
+ * Note that even with this flag set, it is still an error
* to try to flush a protected entry.
*
* JRM -- 3/25/065
@@ -3094,9 +3094,9 @@ H5C_flush_cache(H5F_t * f,
node_ptr = H5SL_first(cache_ptr->slist_ptr);
#if H5C_DO_SANITY_CHECKS
- /* H5C_flush_single_entry() now removes dirty entries from the
+ /* H5C_flush_single_entry() now removes dirty entries from the
* slist as it flushes them. Thus for sanity checks we must
- * make note of the initial slist length and size before we
+ * make note of the initial slist length and size before we
* do any flushes.
*/
initial_slist_len = cache_ptr->slist_len;
@@ -3126,8 +3126,8 @@ H5C_flush_cache(H5F_t * f,
if ( entry_ptr->is_protected ) {
- /* we probably have major problems -- but lets flush
- * everything we can before we decide whether to flag
+ /* we probably have major problems -- but lets flush
+ * everything we can before we decide whether to flag
* an error.
*/
tried_to_flush_protected_entry = TRUE;
@@ -3169,7 +3169,7 @@ H5C_flush_cache(H5F_t * f,
HDassert( protected_entries <= cache_ptr->pl_len );
- if ( ( ( cache_ptr->pl_len > 0 ) && ( !ignore_protected ) )
+ if ( ( ( cache_ptr->pl_len > 0 ) && ( !ignore_protected ) )
||
( tried_to_flush_protected_entry ) ) {
@@ -3191,16 +3191,16 @@ done:
* Purpose: Flush dirty entries until the caches min clean size is
* attained.
*
- * This function is used in the implementation of the
- * metadata cache in PHDF5. To avoid "messages from the
- * future", the cache on process 0 can't be allowed to
+ * This function is used in the implementation of the
+ * metadata cache in PHDF5. To avoid "messages from the
+ * future", the cache on process 0 can't be allowed to
* flush entries until the other processes have reached
* the same point in the calculation. If this constraint
* is not met, it is possible that the other processes will
- * read metadata generated at a future point in the
+ * read metadata generated at a future point in the
* computation.
*
- *
+ *
* Return: Non-negative on success/Negative on failure or if
* write is not permitted.
*
@@ -3214,9 +3214,9 @@ done:
* upon return.
*
* Do this by scanning up the dirty LRU list for entries to
- * flush to reach min clean size, setting their flush_marker
- * flags, and recording their addresses in the order
- * encountered.
+ * flush to reach min clean size, setting their flush_marker
+ * flags, and recording their addresses in the order
+ * encountered.
*
* Then call H5C_flush_cache() to flush the marked entries.
*
@@ -3228,13 +3228,13 @@ done:
* This change had the oposite of the desired effect. Lets
* leave it in (albeit commented out for now). If we can't
* find a case where it helps, lets get rid of it.
- *
+ *
*-------------------------------------------------------------------------
*/
herr_t
-H5C_flush_to_min_clean(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
+H5C_flush_to_min_clean(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
H5C_t * cache_ptr)
{
herr_t result;
@@ -3277,7 +3277,7 @@ H5C_flush_to_min_clean(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"cache write is not permitted!?!\n");
}
-#if 1 /* original code */
+#if 1 /* original code */
result = H5C_make_space_in_cache(f,
primary_dxpl_id,
secondary_dxpl_id,
@@ -3291,17 +3291,17 @@ H5C_flush_to_min_clean(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"H5C_make_space_in_cache failed.")
}
-#else /* modified code -- commented out for now */
+#else /* modified code -- commented out for now */
if ( cache_ptr->max_cache_size > cache_ptr->index_size ) {
- if ( ((cache_ptr->max_cache_size - cache_ptr->index_size) +
+ if ( ((cache_ptr->max_cache_size - cache_ptr->index_size) +
cache_ptr->cLRU_list_size) >= cache_ptr->min_clean_size ) {
space_needed = 0;
} else {
- space_needed = cache_ptr->min_clean_size -
+ space_needed = cache_ptr->min_clean_size -
((cache_ptr->max_cache_size - cache_ptr->index_size) +
cache_ptr->cLRU_list_size);
}
@@ -3313,7 +3313,7 @@ H5C_flush_to_min_clean(H5F_t * f,
} else {
- space_needed = cache_ptr->min_clean_size -
+ space_needed = cache_ptr->min_clean_size -
cache_ptr->cLRU_list_size;
}
}
@@ -3360,7 +3360,7 @@ H5C_flush_to_min_clean(H5F_t * f,
( flushed_entries_size < space_needed ) ) {
HDfprintf(stdout, "flushed_entries_count = %d <= %d = slist_size\n",
(int)flushed_entries_count, (int)(cache_ptr->slist_size));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"flushed_entries_size = %d < %d = space_needed.\n",
(int)flushed_entries_size, (int)space_needed);
}
@@ -3370,7 +3370,7 @@ H5C_flush_to_min_clean(H5F_t * f,
/* Flush the marked entries */
- result = H5C_flush_cache(f, primary_dxpl_id, secondary_dxpl_id,
+ result = H5C_flush_cache(f, primary_dxpl_id, secondary_dxpl_id,
cache_ptr, H5C__FLUSH_MARKED_ENTRIES_FLAG |
H5C__FLUSH_IGNORE_PROTECTED_FLAG);
@@ -3380,12 +3380,12 @@ H5C_flush_to_min_clean(H5F_t * f,
}
/* Now touch up the LRU list so as to place the flushed entries in
- * the order they they would be in if we had flushed them in the
+ * the order they they would be in if we had flushed them in the
* order we encountered them in.
*/
i = 0;
- while ( i < flushed_entries_count )
+ while ( i < flushed_entries_count )
{
H5C__SEARCH_INDEX_NO_STATS(cache_ptr, flushed_entries_list[i], \
entry_ptr, FAIL)
@@ -3640,7 +3640,7 @@ H5C_get_entry_status(H5C_t * cache_ptr,
if ( entry_ptr == NULL ) {
- /* the entry doesn't exist in the cache -- report this
+ /* the entry doesn't exist in the cache -- report this
* and quit.
*/
*in_cache_ptr = FALSE;
@@ -3726,7 +3726,7 @@ done:
* is_dirty field of H5C_cache_entry_t into the H5C code.
*
* JRM -- 6/24/05
- * Added support for the new write_permitted field of
+ * Added support for the new write_permitted field of
* the H5C_t structure.
*
* JRM -- 3/16/06
@@ -3734,7 +3734,7 @@ done:
* H5C_cache_entry_t structure.
*
* JRM -- 5/3/06
- * Added initialization for the new dirtied field of the
+ * Added initialization for the new dirtied field of the
* H5C_cache_entry_t structure.
*
*-------------------------------------------------------------------------
@@ -3769,16 +3769,16 @@ H5C_insert_entry(H5F_t * f,
HDassert( H5F_addr_defined(addr) );
HDassert( thing );
-#if H5C_DO_EXTREME_SANITY_CHECKS
+#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_verify_not_in_index(cache_ptr, (H5C_cache_entry_t *)thing) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "thing already in index.\n");
}
#endif /* H5C_DO_SANITY_CHECKS */
-#if H5C_DO_EXTREME_SANITY_CHECKS
+#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
@@ -3943,7 +3943,7 @@ H5C_insert_entry(H5F_t * f,
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
@@ -3971,9 +3971,9 @@ done:
* Function: H5C_mark_entries_as_clean
*
* Purpose: When the H5C code is used to implement the metadata caches
- * in PHDF5, only the cache with MPI_rank 0 is allowed to
+ * in PHDF5, only the cache with MPI_rank 0 is allowed to
* actually write entries to disk -- all other caches must
- * retain dirty entries until they are advised that the
+ * retain dirty entries until they are advised that the
* entries are clean.
*
* This function exists to allow the H5C code to receive these
@@ -3981,13 +3981,13 @@ done:
*
* The function receives a list of entry base addresses
* which must refer to dirty entries in the cache. If any
- * of the entries are either clean or don't exist, the
+ * of the entries are either clean or don't exist, the
* function flags an error.
*
* The function scans the list of entries and flushes all
- * those that are currently unprotected with the
+ * those that are currently unprotected with the
* H5C__FLUSH_CLEAR_ONLY_FLAG. Those that are currently
- * protected are flagged for clearing when they are
+ * protected are flagged for clearing when they are
* unprotected.
*
* Return: Non-negative on success/Negative on failure
@@ -4048,7 +4048,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HDassert(0);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
@@ -4080,7 +4080,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HDassert(0);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
@@ -4094,7 +4094,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
if ( entry_ptr == NULL ) {
#if H5C_DO_SANITY_CHECKS
- HDfprintf(stdout,
+ HDfprintf(stdout,
"H5C_mark_entries_as_clean: entry[%d] = %ld not in cache.\n",
(int)i,
(long)addr);
@@ -4105,7 +4105,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
} else if ( ! entry_ptr->is_dirty ) {
#if H5C_DO_SANITY_CHECKS
- HDfprintf(stdout,
+ HDfprintf(stdout,
"H5C_mark_entries_as_clean: entry %ld is not dirty!?!\n",
(long)addr);
#endif /* H5C_DO_SANITY_CHECKS */
@@ -4131,7 +4131,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
}
}
-#else /* modified code */
+#else /* modified code */
} else {
/* Mark the entry to be cleared on unprotect. We will
* scan the LRU list shortly, and clear all those entries
@@ -4156,12 +4156,12 @@ H5C_mark_entries_as_clean(H5F_t * f,
#endif /* end modified code */
}
#if 1 /* modified code */
- /* Scan through the LRU list from back to front, and flush the
+ /* Scan through the LRU list from back to front, and flush the
* entries whose clear_on_unprotect flags are set. Observe that
* any protected entries will not be on the LRU, and therefore
* will not be flushed at this time.
*/
-
+
entries_cleared = 0;
entries_examined = 0;
initial_list_len = cache_ptr->LRU_list_len;
@@ -4169,7 +4169,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
while ( ( entry_ptr != NULL ) &&
( entries_examined <= initial_list_len ) &&
- ( entries_cleared < ce_array_len ) )
+ ( entries_cleared < ce_array_len ) )
{
if ( entry_ptr->clear_on_unprotect ) {
@@ -4239,13 +4239,13 @@ H5C_mark_entries_as_clean(H5F_t * f,
HDassert( entries_cleared + protected_entries_marked == ce_array_len );
#endif /* H5C_DO_SANITY_CHECKS */
- HDassert( ( entries_cleared == ce_array_len ) ||
+ HDassert( ( entries_cleared == ce_array_len ) ||
( (ce_array_len - entries_cleared) <= cache_ptr->pl_len ) );
#if H5C_DO_SANITY_CHECKS
i = 0;
entry_ptr = cache_ptr->pl_head_ptr;
- while ( entry_ptr != NULL )
+ while ( entry_ptr != NULL )
{
if ( entry_ptr->clear_on_unprotect ) {
@@ -4261,7 +4261,7 @@ done:
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HDassert(0);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
@@ -4280,7 +4280,7 @@ done:
* Purpose: Mark a pinned entry as dirty. The target entry MUST be
* be pinned, and MUST be unprotected.
*
- * If the entry has changed size, the function updates
+ * If the entry has changed size, the function updates
* data structures for the size change.
*
* If the entry is not already dirty, the function places
@@ -4374,16 +4374,16 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_mark_pinned_or_protected_entry_dirty
*
- * Purpose: Mark a pinned or protected entry as dirty. The target entry
+ * Purpose: Mark a pinned or protected entry as dirty. The target entry
* MUST be either pinned or protected, and MAY be both.
*
* At present, this funtion does not support size change.
*
- * In the protected case, this call is the functional
+ * In the protected case, this call is the functional
* equivalent of setting the H5C__DIRTIED_FLAG on an unprotect
* call.
*
- * In the pinned but not protected case, if the entry is not
+ * In the pinned but not protected case, if the entry is not
* already dirty, the function places function marks the entry
* dirty and places it on the skip list.
*
@@ -4498,7 +4498,7 @@ H5C_rename_entry(H5C_t * cache_ptr,
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
@@ -4575,7 +4575,7 @@ done:
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
@@ -4589,7 +4589,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_pin_protected_entry()
*
- * Purpose: Pin a protected cache entry. The entry must be protected
+ * Purpose: Pin a protected cache entry. The entry must be protected
* at the time of call, and must be unpinned.
*
* Return: Non-negative on success/Negative on failure
@@ -4703,7 +4703,7 @@ done:
* Hand optimizations.
*
* JRM -- 5/3/06
- * Added code to set the new dirtied field in
+ * Added code to set the new dirtied field in
* H5C_cache_entry_t to FALSE prior to return.
*
*-------------------------------------------------------------------------
@@ -4741,7 +4741,7 @@ H5C_protect(H5F_t * f,
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HDassert(0);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \
"LRU sanity check failed.\n");
@@ -4966,7 +4966,7 @@ done:
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HDassert(0);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \
"LRU sanity check failed.\n");
@@ -5273,7 +5273,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_set_prefix
*
- * Purpose: Set the values of the prefix field of H5C_t. This
+ * Purpose: Set the values of the prefix field of H5C_t. This
* filed is used to label some debugging output.
*
* Return: Non-negative on success/Negative on failure
@@ -5374,11 +5374,11 @@ done:
* Updated function for the addition of the hash table.
*
* JRM -- 9/8/05
- * Updated function for the addition of cache entry size
+ * Updated function for the addition of cache entry size
* change statistics.
*
* JRM -- 1/13/06
- * Added code to use the prefix field of H5C_t to allow
+ * Added code to use the prefix field of H5C_t to allow
* tagging of statistics output.
*
* JRM -- 3/21/06
@@ -5502,7 +5502,7 @@ H5C_stats(H5C_t * cache_ptr,
HDfprintf(stdout, "\n");
HDfprintf(stdout,
- "%s hash table insertion / deletions = %ld / %ld\n",
+ "%s hash table insertion / deletions = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->total_ht_insertions),
(long)(cache_ptr->total_ht_deletions));
@@ -5550,7 +5550,7 @@ H5C_stats(H5C_t * cache_ptr,
(long)(cache_ptr->max_pel_size),
(long)(cache_ptr->pel_len),
(long)(cache_ptr->max_pel_len));
-
+
HDfprintf(stdout,
"%s current LRU list size / length = %ld / %ld\n",
cache_ptr->prefix,
@@ -5593,7 +5593,7 @@ H5C_stats(H5C_t * cache_ptr,
(long)total_size_increases,
(long)total_size_decreases);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s Total entry pins (dirty) / unpins = %ld (%ld) / %ld\n",
cache_ptr->prefix,
(long)total_pins,
@@ -5663,19 +5663,19 @@ H5C_stats(H5C_t * cache_ptr,
(long)(cache_ptr->insertions[i]),
(long)(cache_ptr->renames[i]));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s size increases / decreases = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->size_increases[i]),
(long)(cache_ptr->size_decreases[i]));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s entry pins / unpins = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->pins[i]),
(long)(cache_ptr->unpins[i]));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s entry dirty pins/pin'd flushes = %ld / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->dirty_pins[i]),
@@ -5812,7 +5812,7 @@ H5C_stats__reset(H5C_t * cache_ptr)
/*-------------------------------------------------------------------------
* Function: H5C_unpin_entry()
*
- * Purpose: Unpin a cache entry. The entry must be unprotected at
+ * Purpose: Unpin a cache entry. The entry must be unprotected at
* the time of call, and must be pinned.
*
* Return: Non-negative on success/Negative on failure
@@ -5945,7 +5945,7 @@ done:
* the new H5C__PIN_ENTRY_FLAG and H5C__UNPIN_ENTRY_FLAG flags.
*
* JRM -- 5/3/06
- * Added code to make use of the new dirtied field in
+ * Added code to make use of the new dirtied field in
* H5C_cache_entry_t. If this field is TRUE, it is the
* equivalent of setting the H5C__DIRTIED_FLAG.
*
@@ -6002,27 +6002,27 @@ H5C_unprotect(H5F_t * f,
HDassert( entry_ptr->addr == addr );
HDassert( entry_ptr->type == type );
- /* also set the dirtied variable if the dirtied field is set in
+ /* also set the dirtied variable if the dirtied field is set in
* the entry.
*/
dirtied |= entry_ptr->dirtied;
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
#ifdef H5_HAVE_PARALLEL
- /* When the H5C code is used to implement the metadata cache in the
+ /* When the H5C code is used to implement the metadata cache in the
* PHDF5 case, only the cache on process 0 is allowed to write to file.
* All the other metadata caches must hold dirty entries until they
- * are told that the entries are clean.
+ * are told that the entries are clean.
*
- * The clear_on_unprotect flag in the H5C_cache_entry_t structure
- * exists to deal with the case in which an entry is protected when
+ * The clear_on_unprotect flag in the H5C_cache_entry_t structure
+ * exists to deal with the case in which an entry is protected when
* its cache receives word that the entry is now clean. In this case,
* the clear_on_unprotect flag is set, and the entry is flushed with
* the H5C__FLUSH_CLEAR_ONLY_FLAG.
@@ -6198,14 +6198,14 @@ H5C_unprotect(H5F_t * f,
"hash table contains multiple entries for addr?!?.")
}
- if ( H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
+ if ( H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
cache_ptr,
- type,
- addr,
+ type,
+ addr,
H5C__FLUSH_CLEAR_ONLY_FLAG,
- &dummy_first_flush,
+ &dummy_first_flush,
TRUE) < 0 ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear.")
@@ -6219,7 +6219,7 @@ done:
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"LRU sanity check failed.\n");
}
@@ -7538,17 +7538,17 @@ done:
*
* If the cache contains protected entries, the function will
* fail, as protected entries cannot be either flushed or
- * destroyed. However all unprotected entries should be
+ * destroyed. However all unprotected entries should be
* flushed and destroyed before the function returns failure.
*
* While pinned entries can usually be flushed, they cannot
* be destroyed. However, they should be unpinned when all
* the entries that reference them have been destroyed (thus
* reduding the pinned entry's reference count to 0, allowing
- * it to be unpinned).
+ * it to be unpinned).
*
- * If pinned entries are present, the function makes repeated
- * passes through the cache, flushing all dirty entries
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
* (including the pinned dirty entries where permitted) and
* destroying all unpinned entries. This process is repeated
* until either the cache is empty, or the number of pinned
@@ -7625,7 +7625,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
cur_pel_len = cache_ptr->pel_len;
old_pel_len = cache_ptr->pel_len;
- while ( ( first_pass ) ||
+ while ( ( first_pass ) ||
( ( cur_pel_len < old_pel_len ) && ( protected_entries == 0 ) ) )
{
have_pinned_entries = ( cur_pel_len > 0 );
@@ -7642,9 +7642,9 @@ H5C_flush_invalidate_cache(H5F_t * f,
node_ptr = H5SL_first(cache_ptr->slist_ptr);
#if H5C_DO_SANITY_CHECKS
- /* Depending on circumstances, H5C_flush_single_entry() will
- * remove dirty entries from the slist as it flushes them.
- * Thus for sanity checks we must make note of the initial
+ /* Depending on circumstances, H5C_flush_single_entry() will
+ * remove dirty entries from the slist as it flushes them.
+ * Thus for sanity checks we must make note of the initial
* slist length and size before we do any flushes.
*/
initial_slist_len = cache_ptr->slist_len;
@@ -7657,13 +7657,13 @@ H5C_flush_invalidate_cache(H5F_t * f,
{
/* Note that we now remove nodes from the slist as we flush
* the associated entries, instead of leaving them there
- * until we are done, and then destroying all nodes in
+ * until we are done, and then destroying all nodes in
* the slist.
*
* While this optimization is still easy if everything works,
- * the addition of pinned entries and multiple passes
+ * the addition of pinned entries and multiple passes
* through the cache to allow entries to unpin themselves
- * complicates error recover greatly.
+ * complicates error recover greatly.
*
* Given these complications, I've decided to ommit this
* this optimization for now. It can be re-implemented
@@ -7687,7 +7687,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
if ( entry_ptr->is_protected ) {
- /* we have major problems -- but lets flush
+ /* we have major problems -- but lets flush
* everything we can before we flag an error.
*/
protected_entries++;
@@ -7728,7 +7728,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
cache_ptr,
NULL,
entry_ptr->addr,
- (cooked_flags |
+ (cooked_flags |
H5C__FLUSH_INVALIDATE_FLAG),
&first_flush,
TRUE);
@@ -7749,8 +7749,8 @@ H5C_flush_invalidate_cache(H5F_t * f,
#endif /* H5C_DO_SANITY_CHECKS */
/* Since we are doing a destroy, we must make a pass through
- * the hash table and try to flush - destroy all entries that
- * remain. Note that all remaining entries entries must be
+ * the hash table and try to flush - destroy all entries that
+ * remain. Note that all remaining entries entries must be
* clean, so this will not result in any writes to disk.
*/
for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ )
@@ -7784,7 +7784,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
cache_ptr,
NULL,
entry_ptr->addr,
- (cooked_flags |
+ (cooked_flags |
H5C__FLUSH_INVALIDATE_FLAG),
&first_flush,
TRUE);
@@ -7797,8 +7797,8 @@ H5C_flush_invalidate_cache(H5F_t * f,
"Clean entry flush destroy failed.")
}
}
- /* We can't do anything if the entry is pinned. The
- * hope is that the entry will be unpinned as the
+ /* We can't do anything if the entry is pinned. The
+ * hope is that the entry will be unpinned as the
* result of destroys of entries that reference it.
*
* We detect this by noting the change in the number
@@ -7811,7 +7811,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
HDassert( protected_entries == cache_ptr->pl_len );
old_pel_len = cur_pel_len;
- cur_pel_len = cache_ptr->pel_len;
+ cur_pel_len = cache_ptr->pel_len;
first_pass = FALSE;
@@ -7898,9 +7898,9 @@ done:
* respectively.
*
* JRM -- 6/24/05
- * Added code to remove dirty entries from the slist after
- * they have been flushed. Also added a sanity check that
- * will scream if we attempt a write when writes are
+ * Added code to remove dirty entries from the slist after
+ * they have been flushed. Also added a sanity check that
+ * will scream if we attempt a write when writes are
* completely disabled.
*
* JRM -- 7/5/05
@@ -8233,7 +8233,7 @@ H5C_flush_single_entry(H5F_t * f,
if ( cache_ptr->log_flush ) {
- status = (cache_ptr->log_flush)(cache_ptr, addr, was_dirty,
+ status = (cache_ptr->log_flush)(cache_ptr, addr, was_dirty,
flags, type_id);
if ( status < 0 ) {
@@ -8491,7 +8491,7 @@ H5C_make_space_in_cache(H5F_t * f,
entry_ptr = cache_ptr->dLRU_tail_ptr;
if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
-
+
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
} else {
@@ -8499,7 +8499,7 @@ H5C_make_space_in_cache(H5F_t * f,
empty_space = 0;
}
- while ( ( (cache_ptr->cLRU_list_size + empty_space)
+ while ( ( (cache_ptr->cLRU_list_size + empty_space)
< cache_ptr->min_clean_size ) &&
( entries_examined <= initial_list_len ) &&
( entry_ptr != NULL )
@@ -8587,7 +8587,7 @@ done:
*
* Purpose: Debugging function that scans the LRU list for errors.
*
- * If an error is detected, the function generates a
+ * If an error is detected, the function generates a
* diagnostic and returns FAIL. If no error is detected,
* the function returns SUCCEED.
*
@@ -8615,10 +8615,10 @@ H5C_validate_lru_list(H5C_t * cache_ptr)
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if ( ( ( cache_ptr->LRU_head_ptr == NULL )
- ||
- ( cache_ptr->LRU_tail_ptr == NULL )
- )
+ if ( ( ( cache_ptr->LRU_head_ptr == NULL )
+ ||
+ ( cache_ptr->LRU_tail_ptr == NULL )
+ )
&&
( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
) {
@@ -8633,7 +8633,7 @@ H5C_validate_lru_list(H5C_t * cache_ptr)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
}
- if ( ( cache_ptr->LRU_list_len == 1 )
+ if ( ( cache_ptr->LRU_list_len == 1 )
&&
( ( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
||
@@ -8688,7 +8688,7 @@ H5C_validate_lru_list(H5C_t * cache_ptr)
entry_ptr = entry_ptr->next;
}
- if ( ( cache_ptr->LRU_list_len != len ) ||
+ if ( ( cache_ptr->LRU_list_len != len ) ||
( cache_ptr->LRU_list_size != size ) ) {
HDfprintf(stdout,"H5C_validate_lru_list: Check 7 failed.\n");
@@ -8717,7 +8717,7 @@ done:
* that the specified instance of H5C_cache_entry_t is not
* present.
*
- * If an error is detected, the function generates a
+ * If an error is detected, the function generates a
* diagnostic and returns FAIL. If no error is detected,
* the function returns SUCCEED.
*
@@ -8756,7 +8756,7 @@ H5C_verify_not_in_index(H5C_t * cache_ptr,
{
if ( scan_ptr == entry_ptr ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"H5C_verify_not_in_index: entry in index (%d/%d)\n",
i, depth);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
diff --git a/src/H5CS.c b/src/H5CS.c
index 3de842b..79db4ba 100644
--- a/src/H5CS.c
+++ b/src/H5CS.c
@@ -167,10 +167,10 @@ herr_t
H5CS_print(FILE *stream)
{
H5CS_t *fstack = H5CS_get_my_stack (); /* Get the correct function stack */
-
+
/* Don't push this function on the function stack... :-) */
FUNC_ENTER_NOAPI_NOFUNC_NOFS(H5CS_print);
-
+
/* Sanity check */
assert(fstack);
@@ -270,7 +270,7 @@ H5CS_copy_stack(H5CS_t *new_stack)
{
H5CS_t *old_stack = H5CS_get_my_stack ();
unsigned u; /* Local index variable */
-
+
/* Don't push this function on the function stack... :-) */
FUNC_ENTER_NOAPI_NOFUNC_NOFS(H5CS_copy_stack);
@@ -304,7 +304,7 @@ herr_t
H5CS_close_stack(H5CS_t *stack)
{
unsigned u; /* Local index variable */
-
+
/* Don't push this function on the function stack... :-) */
FUNC_ENTER_NOAPI_NOFUNC_NOFS(H5CS_close_stack);
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 12fcfe5..16c27de 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -79,7 +79,7 @@
*
* JRM - 7/19/04
*
- * The TBBT has since been replaced with a skip list. This change
+ * The TBBT has since been replaced with a skip list. This change
* greatly predates this note.
*
* JRM - 9/26/05
@@ -87,7 +87,7 @@
* magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. This
* field is used to validate pointers to instances of H5C_t.
*
- * aux_ptr: Pointer to void used to allow wrapper code to associate
+ * aux_ptr: Pointer to void used to allow wrapper code to associate
* its data with an instance of H5C_t. The H5C cache code
* sets this field to NULL, and otherwise leaves it alone.
*
@@ -142,10 +142,10 @@
* the cache uses the following write_permitted field to
* determine whether writes are permitted.
*
- * write_permitted: If check_write_permitted is NULL, this boolean flag
+ * write_permitted: If check_write_permitted is NULL, this boolean flag
* indicates whether writes are permitted.
*
- * log_flush: If provided, this function is called whenever a dirty
+ * log_flush: If provided, this function is called whenever a dirty
* entry is flushed to disk.
*
*
@@ -187,7 +187,7 @@
* on how frequently the cache is flushed. We will see how it goes.
*
* For now at least, I will not remove dirty entries from the list as they
- * are flushed. (this has been changed -- dirty entries are now removed from
+ * are flushed. (this has been changed -- dirty entries are now removed from
* the skip list as they are flushed. JRM - 10/25/05)
*
* slist_len: Number of entries currently in the skip list
@@ -235,22 +235,22 @@
*
* For very frequently used entries, the protect/unprotect overhead can
* become burdensome. To avoid this overhead, I have modified the cache
- * to allow entries to be "pinned". A pinned entry is similar to a
+ * to allow entries to be "pinned". A pinned entry is similar to a
* protected entry, in the sense that it cannot be evicted, and that
* the entry can be modified at any time.
*
* Pinning an entry has the following implications:
*
* 1) A pinned entry cannot be evicted. Thus unprotected
- * pinned entries reside in the pinned entry list, instead
+ * pinned entries reside in the pinned entry list, instead
* of the LRU list(s) (or other lists maintained by the current
* replacement policy code).
- *
+ *
* 2) A pinned entry can be accessed or modified at any time.
* Therefore, the cache must check with the entry owner
* before flushing it. If permission is denied, the
* cache just skips the entry in the flush.
- *
+ *
* 3) A pinned entry can be marked as dirty (and possibly
* change size) while it is unprotected.
*
@@ -263,20 +263,20 @@
*
* Maintaining the pinned entry list requires the following fields:
*
- * pel_len: Number of entries currently residing on the pinned
+ * pel_len: Number of entries currently residing on the pinned
* entry list.
*
* pel_size: Number of bytes of cache entries currently residing on
* the pinned entry list.
*
* pel_head_ptr: Pointer to the head of the doubly linked list of pinned
- * but not protected entries. Note that cache entries on
+ * but not protected entries. Note that cache entries on
* this list are linked by their next and prev fields.
*
* This field is NULL if the list is empty.
*
* pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned
- * but not protected entries. Note that cache entries on
+ * but not protected entries. Note that cache entries on
* this list are linked by their next and prev fields.
*
* This field is NULL if the list is empty.
@@ -568,24 +568,24 @@
* id equal to the array index has been marked dirty while pinned
* in the current epoch.
*
- * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry
- * with type id equal to the array index has been flushed while
+ * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry
+ * with type id equal to the array index has been flushed while
* pinned in the current epoch.
*
- * pinned_cleared: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry
- * with type id equal to the array index has been cleared while
+ * pinned_cleared: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * cells are used to record the number of times an entry
+ * with type id equal to the array index has been cleared while
* pinned in the current epoch.
*
*
- * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
+ * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
* with type id equal to the array index has increased in
* size in the current epoch.
*
- * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
+ * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * The cells are used to record the number of times an entry
* with type id equal to the array index has decreased in
* size in the current epoch.
*
@@ -662,8 +662,8 @@
* the cache in the current epoch.
*
* max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times that any single
- * entry with type id equal to the array index that has been
+ * are used to record the maximum number of times that any single
+ * entry with type id equal to the array index that has been
* marked as pinned in the cache in the current epoch.
*
*
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index bdcf501..fd54d69 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -187,8 +187,8 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
*
* In typical application, this structure is the first field in a
* structure to be cached. For historical reasons, the external module
- * is responsible for managing the is_dirty field (this is no longer
- * completely true. See the comment on the is_dirty field for details).
+ * is responsible for managing the is_dirty field (this is no longer
+ * completely true. See the comment on the is_dirty field for details).
* All other fields are managed by the cache.
*
* The fields of this structure are discussed individually below:
@@ -230,23 +230,23 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
*
* Update: Management of the is_dirty field has been largely
* moved into the cache. The only remaining exceptions
- * are the flush and clear functions supplied by the
- * modules using the cache. These still clear the
+ * are the flush and clear functions supplied by the
+ * modules using the cache. These still clear the
* is_dirty field as before. -- JRM 7/5/05
*
* dirtied: Boolean flag used to indicate that the entry has been
* dirtied while protected.
*
* This field is set to FALSE in the protect call, and may
- * be set to TRUE by the
+ * be set to TRUE by the
* H5C_mark_pinned_or_protected_entry_dirty()
* call at an time prior to the unprotect call.
*
- * The H5C_mark_pinned_or_protected_entry_dirty() call exists
+ * The H5C_mark_pinned_or_protected_entry_dirty() call exists
* as a convenience function for the fractal heap code which
* may not know if an entry is protected or pinned, but knows
- * that is either protected or pinned. The dirtied field was
- * added as in the parallel case, it is necessary to know
+ * that is either protected or pinned. The dirtied field was
+ * added as in the parallel case, it is necessary to know
* whether a protected entry was dirty prior to the protect call.
*
* is_protected: Boolean flag indicating whether this entry is protected
@@ -261,7 +261,7 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* is_pinned: Boolean flag indicating whether the entry has been pinned
* in the cache.
*
- * For very hot entries, the protect / unprotect overhead
+ * For very hot entries, the protect / unprotect overhead
* can become excessive. Thus the cache has been extended
* to allow an entry to be "pinned" in the cache.
*
@@ -274,14 +274,14 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
*
* 2) A pinned entry can be accessed or modified at any time.
* Therefore, the cache must check with the entry owner
- * before flushing it. If permission is denied, the
+ * before flushing it. If permission is denied, the
* cache does not flush the entry.
*
- * 3) A pinned entry can be marked as dirty (and possibly
+ * 3) A pinned entry can be marked as dirty (and possibly
* change size) while it is unprotected.
*
- * 4) The flush-destroy code must allow pinned entries to
- * be unpinned (and possibly unprotected) during the
+ * 4) The flush-destroy code must allow pinned entries to
+ * be unpinned (and possibly unprotected) during the
* flush.
*
* JRM -- 3/16/06
@@ -301,13 +301,13 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
*
* clear_on_unprotect: Boolean flag used only in PHDF5. When H5C is used
* to implement the metadata cache In the parallel case, only
- * the cache with mpi rank 0 is allowed to actually write to
+ * the cache with mpi rank 0 is allowed to actually write to
* file -- all other caches must retain dirty entries until they
- * are advised that the entry is clean.
+ * are advised that the entry is clean.
*
- * This flag is used in the case that such an advisory is
+ * This flag is used in the case that such an advisory is
* received when the entry is protected. If it is set when an
- * entry is unprotected, and the dirtied flag is not set in
+ * entry is unprotected, and the dirtied flag is not set in
* the unprotect, the entry's is_dirty flag is reset by flushing
* it with the H5C__FLUSH_CLEAR_ONLY_FLAG.
*
@@ -406,7 +406,7 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* been flushed to file in its life time.
*
* pins: int32_t containing the number of times this cache entry has
- * been pinned in cache in its life time.
+ * been pinned in cache in its life time.
*
****************************************************************************/
@@ -416,7 +416,7 @@ typedef struct H5C_cache_entry_t
size_t size;
const H5C_class_t * type;
hbool_t is_dirty;
- hbool_t dirtied;
+ hbool_t dirtied;
hbool_t is_protected;
hbool_t is_pinned;
hbool_t in_slist;
diff --git a/src/H5D.c b/src/H5D.c
index 5c4591d..0fc4168 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -196,7 +196,7 @@ H5D_init_interface(void)
size_t def_hyp_vec_size = H5D_XFER_HYPER_VECTOR_SIZE_DEF;
#ifdef H5_HAVE_PARALLEL
H5FD_mpio_xfer_t def_io_xfer_mode = H5D_XFER_IO_XFER_MODE_DEF;
- H5FD_mpio_chunk_opt_t def_mpio_chunk_opt_mode = H5D_XFER_MPIO_CHUNK_OPT_HARD_DEF;
+ H5FD_mpio_chunk_opt_t def_mpio_chunk_opt_mode = H5D_XFER_MPIO_CHUNK_OPT_HARD_DEF;
unsigned def_mpio_chunk_opt_num = H5D_XFER_MPIO_CHUNK_OPT_NUM_DEF;
unsigned def_mpio_chunk_opt_ratio = H5D_XFER_MPIO_CHUNK_OPT_RATIO_DEF;
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
index aaa04ea..ae4e473 100644
--- a/src/H5Dcompact.c
+++ b/src/H5Dcompact.c
@@ -155,7 +155,7 @@ done:
* Return: Non-negative on success, negative on failure.
*
* Programmer: Peter Cao
- * December 11, 2005
+ * December 11, 2005
*
*-------------------------------------------------------------------------
*/
@@ -181,10 +181,10 @@ H5D_compact_copy(H5F_t *f_src, H5O_layout_t *layout_src,
HDassert(layout_dst && H5D_COMPACT == layout_dst->type);
/* If there's a source datatype, set up type conversion information */
- if (!dt_src)
+ if (!dt_src)
/* Type conversion not necessary */
HDmemcpy(layout_dst->u.compact.buf, layout_src->u.compact.buf, layout_src->u.compact.size);
- else {
+ else {
H5T_path_t *tpath_src_mem, *tpath_mem_dst; /* Datatype conversion paths */
H5T_t *dt_dst; /* Destination datatype */
H5T_t *dt_mem; /* Memory datatype */
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index 09af474..285f121 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -988,7 +988,7 @@ done:
* Return: Non-negative on success, negative on failure.
*
* Programmer: Quincey Koziol
- * Monday, November 21, 2005
+ * Monday, November 21, 2005
*
* Modifier: Peter Cao
* Saturday, January 07, 2006
@@ -996,7 +996,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_contig_copy(H5F_t *f_src, H5O_layout_t *layout_src,
+H5D_contig_copy(H5F_t *f_src, H5O_layout_t *layout_src,
H5F_t *f_dst, H5O_layout_t *layout_dst, H5T_t *dt_src, hid_t dxpl_id)
{
haddr_t addr_src; /* File offset in source dataset */
diff --git a/src/H5Dio.c b/src/H5Dio.c
index dec068e..05b428e 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -1249,7 +1249,7 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
if(H5D_contig_collective_io(io_info,file_space,mem_space,buf,TRUE)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "contiguous write failed in collective mode");
}
- else
+ else
#endif
{
if((io_info->ops.write)(io_info,
@@ -1514,7 +1514,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
if(H5D_chunk_collective_io(io_info,&fm,buf,FALSE)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunked read failed in collective mode");
}
-
+
else {/* sequential or independent read */
#endif
/* Get first node in chunk skip list */
@@ -1830,7 +1830,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
#ifdef H5_HAVE_PARALLEL
/* Check whether the collective mode can be turned off globally*/
-
+
if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE) {
if(H5D_mpio_chunk_adjust_iomode(io_info,&fm))
HGOTO_ERROR(H5E_DATASET,H5E_CANTGET,FAIL,"can't adjust collective I/O")
@@ -1840,7 +1840,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunked write failed in collective mode");
}
else {/* sequential or independent write */
-
+
#endif /* H5_HAVE_PARALLEL */
/* Get first node in chunk skip list */
chunk_node=H5SL_first(fm.fsel);
@@ -2188,14 +2188,14 @@ H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *f
/* calculate total chunk in file map*/
fm->select_chunk = NULL;
fm->total_chunks = 1;
- for(u=0; u<fm->f_ndims; u++)
+ for(u=0; u<fm->f_ndims; u++)
fm->total_chunks= fm->total_chunks*fm->chunks[u];
- if(IS_H5FD_MPI(dataset->oloc.file)) {
+ if(IS_H5FD_MPI(dataset->oloc.file)) {
H5_CHECK_OVERFLOW(fm->total_chunks, hsize_t, size_t);
if(NULL == (fm->select_chunk = (hbool_t *) H5MM_calloc((size_t)fm->total_chunks * sizeof(hbool_t))))
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate chunk info")
}
-
+
@@ -2219,9 +2219,9 @@ H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *f
HGOTO_ERROR (H5E_DATASET, H5E_BADSELECT, FAIL, "unable to convert from file to memory data space")
/* If the selection is NONE or POINTS, set the flag to FALSE */
- if(fsel_type == H5S_SEL_POINTS || fsel_type == H5S_SEL_NONE)
+ if(fsel_type == H5S_SEL_POINTS || fsel_type == H5S_SEL_NONE)
sel_hyper_flag = FALSE;
- else
+ else
sel_hyper_flag = TRUE;
/* Check if file selection is a point selection */
if(!sel_hyper_flag) {
@@ -2487,7 +2487,7 @@ H5D_create_chunk_file_map_hyper(fm_map *fm,const H5D_t *dset)
end[u]=(coords[u]+fm->chunk_dim[u])-1;
} /* end for */
-
+
/* Calculate the index of this chunk */
if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
diff --git a/src/H5Distore.c b/src/H5Distore.c
index 2978f72..b479aa0 100644
--- a/src/H5Distore.c
+++ b/src/H5Distore.c
@@ -196,7 +196,7 @@ typedef struct H5D_istore_it_ud4_t {
haddr_t addr_dst; /* Address of dest. B-tree */
void *buf; /* Buffer to hold chunk data for read/write */
size_t buf_size; /* Buffer size */
-
+
/* needed for converting variable-length data */
hid_t tid_src; /* Datatype ID for source datatype */
hid_t tid_dst; /* Datatype ID for destination datatype */
@@ -911,12 +911,12 @@ H5D_istore_iter_chunkmap (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_lt
int ret_value = H5B_ITER_CONT; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_iter_chunkmap);
-
+
rank = udata->common.mesg->u.chunk.ndims - 1;
-
+
if(H5V_chunk_index(rank,lt_key->offset,udata->common.mesg->u.chunk.dim,udata->down_chunks,&chunk_index)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
-
+
udata->chunk_addr[chunk_index] = addr;
done:
@@ -969,15 +969,15 @@ H5D_istore_iter_dump (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_lt_key
/*-------------------------------------------------------------------------
- * Function: H5D_istore_iter_copy
+ * Function: H5D_istore_iter_copy
*
- * Purpose: copy chunked raw data from source file and insert to the
+ * Purpose: copy chunked raw data from source file and insert to the
* B-tree node in the destination file
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Peter Cao
- * August 20, 2005
+ * Programmer: Peter Cao
+ * August 20, 2005
*
*-------------------------------------------------------------------------
*/
@@ -2446,8 +2446,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5D_istore_chunkmap
*
- * Purpose: obtain the chunk address and corresponding chunk index
- *
+ * Purpose: obtain the chunk address and corresponding chunk index
+ *
* Return: Success: Non-negative on succeed.
*
* Failure: negative value
@@ -3510,13 +3510,13 @@ done:
* Return: Non-negative on success (with the ISTORE argument initialized
* and ready to write to an object header). Negative on failure.
*
- * Programmer: Peter Cao
- * August 20, 2005
+ * Programmer: Peter Cao
+ * August 20, 2005
*
*-------------------------------------------------------------------------
*/
herr_t
-H5D_istore_copy(H5F_t *f_src, H5O_layout_t *layout_src, H5F_t *f_dst,
+H5D_istore_copy(H5F_t *f_src, H5O_layout_t *layout_src, H5F_t *f_dst,
H5O_layout_t *layout_dst, H5T_t *dt_src, H5O_pline_t *pline, hid_t dxpl_id)
{
H5D_istore_it_ud4_t udata;
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index 7ad8e50..ead1777 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -104,29 +104,29 @@ typedef struct H5D_common_coll_info_t {
size_t mpi_buf_count;
haddr_t chunk_addr;
} H5D_common_coll_info_t;
-
+
/********************/
/* Local Prototypes */
/********************/
-static herr_t
-H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
+static herr_t
+H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
hbool_t do_write);
static herr_t
-H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
+H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
hbool_t do_write,int sum_chunk);
-static herr_t
+static herr_t
H5D_inter_collective_io(H5D_io_info_t *io_info,const H5S_t *file_space,
- const H5S_t *mem_space,haddr_t addr,
+ const H5S_t *mem_space,haddr_t addr,
const void *buf, hbool_t do_write );
-static herr_t
+static herr_t
H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,
MPI_Datatype *mpi_buf_type,
- H5D_common_coll_info_t* coll_info,
+ H5D_common_coll_info_t* coll_info,
const void *buf, hbool_t do_write);
#ifdef OLD_WAY
static herr_t
@@ -134,14 +134,14 @@ H5D_pre_sort_chunk(H5D_io_info_t *io_info,int total_chunks,
haddr_t total_chunk_addr_array[]);
#endif
-static herr_t
+static herr_t
H5D_sort_chunk(H5D_io_info_t * io_info,
fm_map *fm,
H5D_chunk_addr_info_t chunk_addr_info_array[],
int many_chunk_opt);
-static herr_t
-H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
+static herr_t
+H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
fm_map *fm,
uint8_t assign_io_mode[],
haddr_t chunk_addr[]);
@@ -151,7 +151,7 @@ static herr_t H5D_ioinfo_make_coll(H5D_io_info_t *io_info);
static herr_t H5D_mpio_get_min_chunk(const H5D_io_info_t *io_info,
const fm_map *fm, int *min_chunkf);
static int H5D_cmp_chunk_addr(const void *addr1, const void *addr2);
-static herr_t
+static herr_t
H5D_mpio_get_sum_chunk(const H5D_io_info_t *io_info,
const fm_map *fm, int *sum_chunkf);
@@ -284,10 +284,10 @@ done:
* Decription: If H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS is not defined,
collective IO with no contribution from one or more
processes are not assured. We will check the minimum
- number of chunks the process is used. If the number is
+ number of chunks the process is used. If the number is
zero, we will use independent IO mode instead.
This is necessary with Linked chunk IO.
- * Purpose: Checks if it is possible to do collective IO
+ * Purpose: Checks if it is possible to do collective IO
*
* Return: Success: Non-negative: TRUE or FALSE
* Failure: Negative
@@ -307,8 +307,8 @@ H5D_mpio_chunk_adjust_iomode(H5D_io_info_t *io_info, const fm_map *fm) {
#ifndef H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS
if(H5D_mpio_get_min_chunk(io_info,fm,&min_chunk)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to obtain the min chunk number of all processes");
- if(min_chunk == 0) {
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to obtain the min chunk number of all processes");
+ if(min_chunk == 0) {
H5P_genplist_t *dx_plist; /* Data transer property list */
/* Get the dataset transfer property list */
@@ -347,11 +347,11 @@ done:
*/
herr_t
H5D_mpio_select_read(H5D_io_info_t *io_info,
- size_t mpi_buf_count,
+ size_t mpi_buf_count,
const size_t UNUSED elmt_size,
- const H5S_t UNUSED *file_space,
+ const H5S_t UNUSED *file_space,
const H5S_t UNUSED *mem_space,
- haddr_t addr,
+ haddr_t addr,
void *buf/*out*/)
{
herr_t ret_value = SUCCEED;
@@ -378,9 +378,9 @@ done:
*/
herr_t
H5D_mpio_select_write(H5D_io_info_t *io_info,
- size_t mpi_buf_count,
+ size_t mpi_buf_count,
const size_t UNUSED elmt_size,
- const H5S_t UNUSED *file_space,
+ const H5S_t UNUSED *file_space,
const H5S_t UNUSED *mem_space,
haddr_t addr,
const void *buf)
@@ -569,8 +569,8 @@ done:
* Function: H5D_contig_collective_io
*
* Purpose: Wrapper Routine for H5D_inter_collective_io
- The starting address of contiguous storage is passed
- *
+ The starting address of contiguous storage is passed
+ *
*
* Return: Non-negative on success/Negative on failure
*
@@ -581,11 +581,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_contig_collective_io(H5D_io_info_t *io_info,
+H5D_contig_collective_io(H5D_io_info_t *io_info,
const H5S_t *file_space,
const H5S_t *mem_space,
const void *buf,
- hbool_t do_write)
+ hbool_t do_write)
{
@@ -607,8 +607,8 @@ H5D_contig_collective_io(H5D_io_info_t *io_info,
#endif
if(H5D_inter_collective_io(io_info,file_space,mem_space,addr,buf,do_write)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish shared collective MPI-IO");
-
- done:
+
+ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_contig_collective_io */
@@ -616,10 +616,10 @@ H5D_contig_collective_io(H5D_io_info_t *io_info,
/*-------------------------------------------------------------------------
* Function: H5D_chunk_collective_io
*
- * Purpose: Routine for
- 1) choose an IO option:
+ * Purpose: Routine for
+ 1) choose an IO option:
a) One collective IO defined by one MPI derived datatype to link through all chunks
- or b) multiple chunk IOs,to do MPI-IO for each chunk, the IO mode may be adjusted
+ or b) multiple chunk IOs,to do MPI-IO for each chunk, the IO mode may be adjusted
due to the selection pattern for each chunk.
* For option a)
1. Sort the chunk address, obtain chunk info according to the sorted chunk address
@@ -633,7 +633,7 @@ H5D_contig_collective_io(H5D_io_info_t *io_info,
2. Depending on whether the IO mode is collective or independent or none,
Create either MPI derived datatype for each chunk to do collective IO or just do independent IO
3. Set up collective IO property list for collective mode
- 4. DO IO
+ 4. DO IO
*
* Return: Non-negative on success/Negative on failure
*
@@ -643,44 +643,44 @@ H5D_contig_collective_io(H5D_io_info_t *io_info,
*
*-------------------------------------------------------------------------
*/
-herr_t
-H5D_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool_t do_write)
+herr_t
+H5D_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool_t do_write)
{
int io_option = H5D_MULTI_CHUNK_IO_MORE_OPT;
int sum_chunk = 0,mpi_size;
unsigned one_link_chunk_io_threshold;
- H5P_genplist_t *plist;
+ H5P_genplist_t *plist;
H5FD_mpio_chunk_opt_t chunk_opt_mode;
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
htri_t check_prop,temp_not_link_io = FALSE;
int prop_value,new_value;
#endif
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_collective_io)
assert (IS_H5FD_MPIO(io_info->dset->oloc.file));
-
+
/* Obtain the data transfer properties */
if(NULL == (plist = H5I_object(io_info->dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list")
-
+
/* Check the optional property list on what to do with collective chunk IO. */
chunk_opt_mode=(H5FD_mpio_chunk_opt_t)H5P_peek_unsigned(plist,H5D_XFER_MPIO_CHUNK_OPT_HARD_NAME);
#ifdef KENT
printf("chunk_opt_mode = %d\n",chunk_opt_mode);
#endif
-
+
if(chunk_opt_mode == H5FD_MPIO_CHUNK_ONE_IO) io_option = H5D_ONE_LINK_CHUNK_IO;/*no opt*/
else if(chunk_opt_mode == H5FD_MPIO_CHUNK_MULTI_IO) io_option = H5D_MULTI_CHUNK_IO;/*no opt */
else {
- if(H5D_mpio_get_sum_chunk(io_info,fm,&sum_chunk)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to obtain the total chunk number of all processes");
+ if(H5D_mpio_get_sum_chunk(io_info,fm,&sum_chunk)<0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to obtain the total chunk number of all processes");
if((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file))<0)
HGOTO_ERROR (H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size");
-
+
if(NULL == (plist = H5I_object(io_info->dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list")
@@ -732,8 +732,8 @@ H5D_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool
HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to get property value");
}
}
-
-
+
+
#endif
#ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
if(io_option == H5D_ONE_LINK_CHUNK_IO ) io_option = H5D_MULTI_CHUNK_IO ;/* We can not do this with one chunk IO. */
@@ -745,9 +745,9 @@ H5D_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool
if(H5D_link_chunk_collective_io(io_info,fm,buf,do_write,sum_chunk)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish linked chunk MPI-IO");
}
-
+
else { /*multiple chunk IOs without opt */
-
+
if(H5D_multi_chunk_collective_io(io_info,fm,buf,do_write)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish multiple chunk MPI-IO");
@@ -764,7 +764,7 @@ done:
1. Sort the chunk address and chunk info
2. Build up MPI derived datatype for each chunk
3. Build up the final MPI derived datatype
- 4. Use common collective IO routine to do MPI-IO
+ 4. Use common collective IO routine to do MPI-IO
*
* Return: Non-negative on success/Negative on failure
@@ -788,9 +788,9 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
size_t mpi_buf_count;
size_t mpi_file_count;
hbool_t mbt_is_derived=0, /* Whether the buffer (memory) type is derived and needs to be free'd */
- mft_is_derived=0; /* Whether the file type is derived and needs to be free'd */
-
- int mpi_size,mpi_code; /* MPI return code */
+ mft_is_derived=0; /* Whether the file type is derived and needs to be free'd */
+
+ int mpi_size,mpi_code; /* MPI return code */
int i,num_chunk=0,total_chunks;
size_t ori_num_chunk;
@@ -818,8 +818,8 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
char *bc_percent = NULL;
char *bcc_percent = NULL;
#endif
- herr_t ret_value = SUCCEED;
-
+ herr_t ret_value = SUCCEED;
+
FUNC_ENTER_NOAPI_NOINIT(H5D_link_chunk_collective_io)
ori_total_chunks = fm->total_chunks;
H5_ASSIGN_OVERFLOW(total_chunks,ori_total_chunks,hsize_t,int);
@@ -829,7 +829,7 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
H5SL_node_t *chunk_node;
H5D_chunk_info_t *chunk_info;
H5D_storage_t store;
-
+
chunk_node = H5SL_first(fm->fsel);
if(chunk_node == NULL) {
if(H5D_istore_chunkmap(io_info,total_chunks,&chunk_base_addr,fm->down_chunks)<0)
@@ -846,7 +846,7 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
if(HADDR_UNDEF==(chunk_base_addr = H5D_istore_get_addr(io_info,NULL)))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk info from skipped list");
-
+
#ifdef KENT
printf("before inter_collective_io for total chunk = 1 \n");
#endif
@@ -864,7 +864,7 @@ printf("before inter_collective_io for total chunk = 1 \n");
printf("total_chunks = %d\n",(int)total_chunks);
#endif
-
+
if(num_chunk == 0) total_chunk_addr_array = H5MM_malloc(sizeof(haddr_t)*total_chunks);
else
{
@@ -888,12 +888,12 @@ printf("total_chunks = %d\n",(int)total_chunks);
"bc" means 'b-tree iterately obtain all chunk addresses individually',
the default one means 'obtaining the chunk address individually',
*/
-
+
if(bcc_percent=getenv("BCC_PERCENT")){
bsearch_coll_chunk_threshold = atoi(bcc_percent);
assert((bsearch_coll_chunk_threshold >=0) &&(bsearch_coll_chunk_threshold <=100));
}
- else
+ else
bsearch_coll_chunk_threshold = H5D_ALL_CHUNK_ADDR_THRES_COL;
#else
bsearch_coll_chunk_threshold = H5D_ALL_CHUNK_ADDR_THRES_COL; /*This number may be changed according to the performance study */
@@ -902,9 +902,9 @@ printf("total_chunks = %d\n",(int)total_chunks);
if((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file))<0)
HGOTO_ERROR (H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size");
- /* Calculate the actual threshold to obtain all chunk addresses collectively
+ /* Calculate the actual threshold to obtain all chunk addresses collectively
The bigger this number is, the more possible the use of obtaining chunk address collectively. */
- /* For non-optimization one-link IO,
+ /* For non-optimization one-link IO,
actual bsearch threshold is always 0,
we would always want to obtain the chunk addresses individually
for each process. */
@@ -921,7 +921,7 @@ printf("total_chunks = %d\n",(int)total_chunks);
bsearch_chunk_ratio = atoi(bc_percent);
assert((bsearch_chunk_ratio<=100)&&(bsearch_chunk_ratio>=0));
}
- else
+ else
bsearch_chunk_ratio = H5D_ALL_CHUNK_ADDR_THRES_IND;
#else
bsearch_chunk_ratio = H5D_ALL_CHUNK_ADDR_THRES_IND; /*This number may be changed according to the performance study */
@@ -931,20 +931,20 @@ printf("total_chunks = %d\n",(int)total_chunks);
The unit of the threshold is the number of chunks. The value should be at least 1.
It can be calculated as follows:
- if(total_chunks*bsearch_chunk_ratio/100 <=1)
+ if(total_chunks*bsearch_chunk_ratio/100 <=1)
bsearch_chunk_threahold = 1;
- else
+ else
bsearch_chunk_threshold = total_chunks*bsearch_chunk_ratio/100;
- In order to make the caluculation more efficient,
+ In order to make the caluculation more efficient,
we use the following approximate formula to calculate the threshold.
bsearch_chunk_threshold = 1+ (total_chunks*bsearch_chunk_ratio-99)/100;
The only difference is when total_chunks* besearch_chunk_ratio == 100n+99;
- the approximate formula will give value (n+1) instead of n for threshold.
+ the approximate formula will give value (n+1) instead of n for threshold.
That shouldn't matter much from our persective.
- */
-
+ */
+
bsearch_chunk_threshold = 1 +(total_chunks*bsearch_chunk_ratio-99)/100;
if(num_chunk > bsearch_chunk_threshold) many_chunk_opt = H5D_OBTAIN_ALL_CHUNK_ADDR_IND;
if((sum_chunk == 0) && (total_chunks >= H5D_ALL_CHUNK_ADDR_THRES_IND_NUM))
@@ -953,14 +953,14 @@ printf("total_chunks = %d\n",(int)total_chunks);
#ifdef KENT
printf("before sorting the chunk address \n");
#endif
- /* Sort the chunk address
+ /* Sort the chunk address
when chunk optimization selection is either H5D_OBTAIN_*/
if(num_chunk == 0){ /* special case: this process doesn't select anything */
if(H5D_istore_chunkmap(io_info,total_chunks,total_chunk_addr_array,fm->down_chunks)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address");
chunk_base_addr = total_chunk_addr_array[0];
}
-
+
else {
if(H5D_sort_chunk(io_info,fm,chunk_addr_info_array,many_chunk_opt)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to sort chunk address");
@@ -969,8 +969,8 @@ printf("before sorting the chunk address \n");
#ifdef KENT
printf("after sorting the chunk address \n");
#endif
-
- /* Obtain MPI derived datatype from all individual chunks */
+
+ /* Obtain MPI derived datatype from all individual chunks */
for ( i = 0; i < num_chunk; i++) {
/* Disk MPI derived datatype */
if(H5S_mpio_space_type(chunk_addr_info_array[i].chunk_info.fspace,src_type_size,&chunk_ftype[i],
@@ -981,7 +981,7 @@ printf("after sorting the chunk address \n");
if(H5S_mpio_space_type(chunk_addr_info_array[i].chunk_info.mspace,dst_type_size,&chunk_mtype[i],
&mpi_buf_count,&mpi_buf_extra_offset,&mbt_is_derived)<0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't create MPI buf type");
-
+
/* Chunk address relative to the first chunk */
chunk_addr_info_array[i].chunk_addr -= chunk_base_addr;
H5_ASSIGN_OVERFLOW(chunk_disp_array[i],chunk_addr_info_array[i].chunk_addr,haddr_t,MPI_Aint);
@@ -989,7 +989,7 @@ printf("after sorting the chunk address \n");
blocklen_value = 1;
if(num_chunk){
-
+
/* initialize the buffer with the constant value 1 */
H5V_array_fill(blocklen,&blocklen_value,sizeof(int),(size_t)num_chunk);
@@ -1022,7 +1022,7 @@ printf("after sorting the chunk address \n");
else {/* no selection at all for this process */
chunk_final_ftype = MPI_BYTE;
chunk_final_mtype = MPI_BYTE;
-
+
/* buffer, file derived datatypes should be true */
coll_info.mbt_is_derived = 0;
coll_info.mft_is_derived = 0;
@@ -1032,7 +1032,7 @@ printf("after sorting the chunk address \n");
#ifdef KENT
printf("before coming to final collective IO\n");
#endif
-
+
if(H5D_final_collective_io(io_info,&chunk_final_ftype,&chunk_final_mtype,&coll_info,buf,do_write)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish MPI-IO");
@@ -1042,7 +1042,7 @@ printf("before freeing memory inside H5D_link_collective_io ret_value = %d\n",r
#endif
-
+
if (fm->total_chunks != 1) {
if(num_chunk == 0) HDfree(total_chunk_addr_array);
else {
@@ -1072,7 +1072,7 @@ printf("before leaving H5D_link_collective_io ret_value = %d\n",ret_value);
1. Use MPI_gather and MPI_Bcast to obtain IO mode in each chunk(collective/independent/none)
2. Depending on whether the IO mode is collective or independent or none,
Create either MPI derived datatype for each chunk or just do independent IO
- 3. Use common collective IO routine to do MPI-IO
+ 3. Use common collective IO routine to do MPI-IO
*
* Return: Non-negative on success/Negative on failure
*
@@ -1082,8 +1082,8 @@ printf("before leaving H5D_link_collective_io ret_value = %d\n",ret_value);
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool_t do_write)
+static herr_t
+H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf, hbool_t do_write)
{
int i,total_chunk;
@@ -1096,7 +1096,7 @@ H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
H5D_storage_t store; /* union of EFL and chunk pointer in file space */
hbool_t select_chunk;
hbool_t last_io_mode_coll = TRUE;
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED;
#ifdef KENT
int mpi_rank;
#endif
@@ -1117,7 +1117,7 @@ H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
#endif
/* obtain IO option for each chunk */
- if(H5D_obtain_mpio_mode(io_info,fm,chunk_io_option,chunk_addr)<0)
+ if(H5D_obtain_mpio_mode(io_info,fm,chunk_io_option,chunk_addr)<0)
HGOTO_ERROR (H5E_DATASET, H5E_CANTRECV, FAIL, "unable to obtain MPIO mode");
for( i = 0; i<total_chunk;i++){
@@ -1140,7 +1140,7 @@ printf("mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
if(NULL ==(chunk_node = H5SL_first(fm->fsel)))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk node from skipped list");
#else
-
+
if(NULL ==(chunk_node = H5SL_first(fm->fsel)))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk node from skipped list");
while(chunk_node){
@@ -1161,30 +1161,30 @@ printf("mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
#endif
}
- if(chunk_io_option[i] == 1){ /*collective IO for this chunk,
+ if(chunk_io_option[i] == 1){ /*collective IO for this chunk,
note: even there is no selection for this process,
the process still needs to contribute MPI NONE TYPE.*/
#ifdef KENT
printf("inside collective chunk IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
#endif
-
+
if(!last_io_mode_coll)
/* Switch back to collective I/O */
if(H5D_ioinfo_make_coll(io_info) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't switch to collective I/O")
-
+
if(select_chunk){
if(H5D_inter_collective_io(io_info,chunk_info->fspace,chunk_info->mspace,
chunk_addr[i],buf,do_write )<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish shared collective MPI-IO");
-
+
}
else{
if(H5D_inter_collective_io(io_info,NULL,NULL,
chunk_addr[i],buf,do_write )<0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish shared collective MPI-IO");
-
- }
+
+ }
last_io_mode_coll = TRUE;
}
@@ -1192,7 +1192,7 @@ printf("inside collective chunk IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i
#ifdef KENT
printf("inside independent IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
#endif
-
+
HDassert(chunk_io_option[i] == 0);
if(!select_chunk) continue; /* this process has nothing to do with this chunk, continue! */
if(last_io_mode_coll)
@@ -1206,16 +1206,16 @@ printf("inside independent IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
chunk_info->fspace,chunk_info->mspace,0,
buf);
/* Check return value of the write */
- if (ret_value<0)
+ if (ret_value<0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
}
else {
ret_value = (io_info->ops.read)(io_info,
chunk_info->chunk_points,H5T_get_size(io_info->dset->shared->type),
chunk_info->fspace,chunk_info->mspace,0,
- buf);
+ buf);
/* Check return value from optimized write */
- if (ret_value<0)
+ if (ret_value<0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
}
@@ -1241,7 +1241,7 @@ printf("inside independent IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
*
* Purpose: Routine for the shared part of collective IO between multiple chunk
collective IO and contiguous collective IO
-
+
*
* Return: Non-negative on success/Negative on failure
*
@@ -1251,16 +1251,16 @@ printf("inside independent IO mpi_rank = %d, chunk index = %d\n",mpi_rank,i);
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_inter_collective_io(H5D_io_info_t *io_info,const H5S_t *file_space,const H5S_t *mem_space,
- haddr_t addr, const void *buf, hbool_t do_write )
+ haddr_t addr, const void *buf, hbool_t do_write )
{
size_t mpi_buf_count, mpi_file_count; /* Number of "objects" to transfer */
MPI_Datatype mpi_file_type,mpi_buf_type;
hsize_t mpi_buf_offset, mpi_file_offset; /* Offset within dataset where selection (ie. MPI type) begins */
hbool_t mbt_is_derived=0, /* Whether the buffer (memory) type is derived and needs to be free'd */
- mft_is_derived=0; /* Whether the file type is derived and needs to be free'd */
+ mft_is_derived=0; /* Whether the file type is derived and needs to be free'd */
H5D_common_coll_info_t coll_info;
herr_t ret_value = SUCCEED; /* return value */
@@ -1274,11 +1274,11 @@ H5D_inter_collective_io(H5D_io_info_t *io_info,const H5S_t *file_space,const H5S
if(H5S_mpio_space_type(mem_space,H5T_get_size(io_info->dset->shared->type),
&mpi_buf_type,&mpi_buf_count,&mpi_buf_offset,&mbt_is_derived)<0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't create MPI buffer type");
-
+
}
else {
/* For non-selection, participate with a none MPI derived datatype, the count is 0. */
- mpi_buf_type = MPI_BYTE;
+ mpi_buf_type = MPI_BYTE;
mpi_file_type = MPI_BYTE;
mpi_file_count = 0;
mpi_buf_count = 0;
@@ -1305,7 +1305,7 @@ printf("before leaving inter_collective_io ret_value = %d\n",ret_value);
* Function: H5D_final_collective_io
*
* Purpose: Routine for the common part of collective IO with different storages.
-
+
*
* Return: Non-negative on success/Negative on failure
*
@@ -1315,13 +1315,13 @@ printf("before leaving inter_collective_io ret_value = %d\n",ret_value);
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Datatype *mpi_buf_type,
- H5D_common_coll_info_t* coll_info, const void *buf, hbool_t do_write)
+ H5D_common_coll_info_t* coll_info, const void *buf, hbool_t do_write)
{
- int mpi_code; /* MPI return code */
+ int mpi_code; /* MPI return code */
hbool_t plist_is_setup=0; /* Whether the dxpl has been customized */
herr_t ret_value = SUCCEED;
@@ -1338,7 +1338,7 @@ H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Da
plist_is_setup=1;
#ifdef KENT
HDfprintf(stdout,"chunk addr %Hu\n",coll_info->chunk_addr);
- printf("mpi_buf_count %d\n",coll_info->mpi_buf_count);
+ printf("mpi_buf_count %d\n",coll_info->mpi_buf_count);
#endif
if(do_write) {
ret_value = (io_info->ops.write)(io_info,
@@ -1348,7 +1348,7 @@ H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Da
#ifdef KENT
printf("ret_value after final collective IO= %d\n",ret_value);
#endif
- if (ret_value<0)
+ if (ret_value<0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
}
else {
@@ -1356,11 +1356,11 @@ H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Da
coll_info->mpi_buf_count,0,NULL,NULL,coll_info->chunk_addr,
buf);
/* Check return value from optimized write */
- if (ret_value<0)
+ if (ret_value<0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
}
done:
-
+
/* Reset the dxpl settings */
if(plist_is_setup) {
if(H5FD_mpi_teardown_collective(io_info->dxpl_id)<0)
@@ -1371,7 +1371,7 @@ H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Da
if (coll_info->mbt_is_derived) {
if (MPI_SUCCESS != (mpi_code= MPI_Type_free( mpi_buf_type )))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code);
-
+
}
if (coll_info->mft_is_derived) {
if (MPI_SUCCESS != (mpi_code= MPI_Type_free( mpi_file_type )))
@@ -1392,7 +1392,7 @@ H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Da
Description:
root will collective all chunk addresses and broadcast towards other processes.
-
+
Parameters:
Input: H5D_io_info_t* io_info,
@@ -1430,7 +1430,7 @@ H5D_pre_sort_chunk(H5D_io_info_t *io_info,int total_chunks,haddr_t total_chunk_a
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_addrtype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
-
+
mpi_type_cleanup = 1;
if(mpi_rank == root) {
@@ -1441,7 +1441,7 @@ H5D_pre_sort_chunk(H5D_io_info_t *io_info,int total_chunks,haddr_t total_chunk_a
/* Broadcasting the MPI_IO option info. and chunk address info. */
if(MPI_SUCCESS !=(mpi_code = MPI_Bcast(total_chunk_addr_array,1,chunk_addrtype,root,comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_BCast failed", mpi_code);
-
+
done:
if(mpi_type_cleanup){
@@ -1462,12 +1462,12 @@ done:
For most cases, the chunk address has already been sorted in increasing order.
The special sorting flag is used to optimize this common case.
quick sort is used for necessary sorting.
-
+
Parameters:
Input: H5D_io_info_t* io_info,
fm_map *fm(global chunk map struct)
- Input/Output: H5D_chunk_addr_info_t chunk_addr_info_array[] : array to store chunk address and information
- many_chunk_opt : flag to optimize the way to obtain chunk addresses
+ Input/Output: H5D_chunk_addr_info_t chunk_addr_info_array[] : array to store chunk address and information
+ many_chunk_opt : flag to optimize the way to obtain chunk addresses
for many chunks
*
* Return: Non-negative on success/Negative on failure
@@ -1479,7 +1479,7 @@ done:
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_sort_chunk(H5D_io_info_t * io_info,
fm_map *fm,
H5D_chunk_addr_info_t chunk_addr_info_array[],
@@ -1500,7 +1500,7 @@ H5D_sort_chunk(H5D_io_info_t * io_info,
H5D_storage_t store; /*union of EFL and chunk pointer in file space */
hbool_t do_sort = FALSE;
herr_t ret_value = SUCCEED; /*return value */
-
+
FUNC_ENTER_NOAPI_NOINIT(H5D_sort_chunk)
num_chunks = H5SL_count(fm->fsel);
@@ -1563,7 +1563,7 @@ printf("Coming inside H5D_OBTAIN_ALL_CHUNK_ADDR_COL\n");
if(HADDR_UNDEF==(chunk_addr = H5D_istore_get_addr(io_info,NULL)))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk info from skipped list");
}
- else
+ else
chunk_addr = total_chunk_addr_array[chunk_info->index];
chunk_addr_info_array[i].chunk_addr = chunk_addr;
chunk_addr_info_array[i].chunk_info = *chunk_info;
@@ -1574,12 +1574,12 @@ printf("Coming inside H5D_OBTAIN_ALL_CHUNK_ADDR_COL\n");
chunk_info = H5SL_item(chunk_node);
store.chunk.offset = chunk_info->coords;
store.chunk.index = chunk_info->index;
-
+
if(many_chunk_opt == H5D_OBTAIN_ONE_CHUNK_ADDR_IND){
if(HADDR_UNDEF==(chunk_addr = H5D_istore_get_addr(io_info,NULL)))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk info from skipped list");
}
- else
+ else
chunk_addr = total_chunk_addr_array[chunk_info->index];
if(chunk_addr < chunk_addr_info_array[i].chunk_addr) do_sort = TRUE;
@@ -1605,7 +1605,7 @@ done:
}
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_sort_chunk() */
-
+
/*-------------------------------------------------------------------------
* Function: H5D_obtain_mpio_mode
@@ -1616,11 +1616,11 @@ done:
Description:
1) Each process provides two piece of information for all chunks with selection
- a) chunk index
+ a) chunk index
b) wheather this chunk is regular(for MPI derived datatype not working case)
2) Gather all the information to the root process
-
+
3) Root process will do the following:
a) Obtain chunk address for all chunks in this data space
b) With the consideration of the user option, calculate IO mode for each chunk
@@ -1628,7 +1628,7 @@ done:
in order to do MPI Bcast only once
d) MPI Bcast the IO mode and chunk address information for each chunk.
4) Each process then retrieves IO mode and chunk address information to assign_io_mode and chunk_addr.
-
+
Parameters:
Input: H5D_io_info_t* io_info,
@@ -1645,8 +1645,8 @@ done:
*-------------------------------------------------------------------------
*/
-static herr_t
-H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
+static herr_t
+H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
fm_map *fm,
uint8_t assign_io_mode[],
haddr_t chunk_addr[])
@@ -1661,7 +1661,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
uint8_t* mergebuf=NULL;
uint8_t* tempbuf;
- H5SL_node_t* chunk_node;
+ H5SL_node_t* chunk_node;
H5D_chunk_info_t* chunk_info;
MPI_Datatype bastype[2];
@@ -1688,7 +1688,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
FUNC_ENTER_NOAPI_NOINIT(H5D_obtain_mpio_mode)
/* Assign the rank 0 to the root */
- root = 0;
+ root = 0;
comm = io_info->comm;
/* Obtain the number of process and the current rank of the process */
@@ -1696,7 +1696,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
HGOTO_ERROR (H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank");
if((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file))<0)
HGOTO_ERROR (H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size");
-
+
/* Allocate memory */
ori_total_chunks = fm->total_chunks;
H5_ASSIGN_OVERFLOW(total_chunks,ori_total_chunks,hsize_t,int);
@@ -1704,30 +1704,30 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
/* Obtain the data transfer properties */
if(NULL == (plist = H5I_object(io_info->dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list")
-
+
percent_nproc_per_chunk=H5P_peek_unsigned(plist,H5D_XFER_MPIO_CHUNK_OPT_RATIO_NAME);
#if defined(H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS) && defined(H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS)
-
+
chunk_opt_mode=(H5FD_mpio_chunk_opt_t)H5P_peek_unsigned(plist,H5D_XFER_MPIO_CHUNK_OPT_HARD_NAME);
if((chunk_opt_mode == H5FD_MPIO_CHUNK_MULTI_IO) || (percent_nproc_per_chunk == 0)){
if(H5D_istore_chunkmap(io_info,total_chunks,chunk_addr,fm->down_chunks)<0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address");
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address");
for(ic = 0; ic<total_chunks;ic++)
assign_io_mode[ic] = H5D_CHUNK_IO_MODE_COL;
goto done;
}
-#endif
+#endif
threshold_nproc_per_chunk = mpi_size * percent_nproc_per_chunk/100;
io_mode_info = (uint8_t *)H5MM_calloc(total_chunks*sizeof(MPI_BYTE));
mergebuf = H5MM_malloc((sizeof(haddr_t)+sizeof(MPI_BYTE))*total_chunks);
tempbuf = mergebuf + sizeof(MPI_BYTE)*total_chunks;
- if(mpi_rank == root)
+ if(mpi_rank == root)
recv_io_mode_info = (uint8_t *)H5MM_malloc(total_chunks*sizeof(MPI_BYTE)*mpi_size);
-
+
mem_cleanup = 1;
chunk_node = H5SL_first(fm->fsel);
@@ -1750,7 +1750,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
chunk_node = H5SL_next(chunk_node);
}
-
+
/*Create sent MPI derived datatype */
if(MPI_SUCCESS !=(mpi_code = MPI_Type_contiguous(total_chunks,MPI_BYTE,&stype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Comm_rank failed", mpi_code);
@@ -1764,7 +1764,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
basdisp[0] = 0;
basdisp[1] = (MPI_Aint)(sizeof(MPI_BYTE)*total_chunks);/* may need to check overflow */
bastype[0] = MPI_BYTE;
-
+
if(MPI_SUCCESS !=(mpi_code = MPI_Type_contiguous(sizeof(haddr_t),MPI_BYTE,&chunk_addrtype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code);
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_addrtype)))
@@ -1792,7 +1792,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
int* ind_this_chunk;
#endif
- /* pre-computing: calculate number of processes and
+ /* pre-computing: calculate number of processes and
regularity of the selection occupied in each chunk */
nproc_per_chunk = (int*)H5MM_calloc(total_chunks*sizeof(int));
#if !defined(H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS) || !defined(H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS)
@@ -1866,7 +1866,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
new_value = 0;
if(H5Pset(io_info->dxpl_id,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,&new_value)<0)
HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to set property value");
-#else
+#else
for(ic = 0; ic < total_chunks; ic++){
if(assign_io_mode[ic] == H5D_CHUNK_IO_MODE_COL) {
new_value = 0;
@@ -1893,7 +1893,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info,
}
}
#endif
-
+
done:
if(mpi_type_cleanup) {
@@ -1910,7 +1910,7 @@ done:
if(mem_cleanup){
HDfree(io_mode_info);
HDfree(mergebuf);
- if(mpi_rank == root)
+ if(mpi_rank == root)
HDfree(recv_io_mode_info);
}
@@ -1923,7 +1923,7 @@ H5D_cmp_chunk_addr(const void *chunk_addr_info1, const void *chunk_addr_info2)
haddr_t addr1, addr2;
FUNC_ENTER_NOAPI_NOINIT(H5D_cmp_chunk_addr)
-
+
addr1 = ((const H5D_chunk_addr_info_t *)chunk_addr_info1)->chunk_addr;
addr2 = ((const H5D_chunk_addr_info_t *)chunk_addr_info2)->chunk_addr;
diff --git a/src/H5Doh.c b/src/H5Doh.c
index 4da2381..ff42659 100644
--- a/src/H5Doh.c
+++ b/src/H5Doh.c
@@ -173,7 +173,7 @@ H5O_dset_free_copy_file_udata(void *_udata)
H5T_close(udata->src_dtype);
/* Release copy of dataset's filter pipeline, if it was set */
- if (udata->src_pline)
+ if (udata->src_pline)
H5O_free(H5O_PLINE_ID, udata->src_pline);
/* Release space for 'copy file' user data */
diff --git a/src/H5E.c b/src/H5E.c
index f7519bb..a891ee8 100644
--- a/src/H5E.c
+++ b/src/H5E.c
@@ -108,7 +108,7 @@ static ssize_t H5E_get_num(const H5E_t *err_stack);
static herr_t H5E_pop(H5E_t *err_stack, size_t count);
static herr_t H5E_clear_entries(H5E_t *estack, size_t nentries);
static herr_t H5E_print_stack(const H5E_t *estack, FILE *stream, hbool_t bk_compatible);
-static herr_t H5E_walk_stack(const H5E_t *estack, H5E_direction_t direction, H5E_walk_t func,
+static herr_t H5E_walk_stack(const H5E_t *estack, H5E_direction_t direction, H5E_walk_t func,
H5E_walk_stack_t stack_func, hbool_t bk_compatible, void *client_data);
static herr_t H5E_walk_cb(unsigned n, const H5E_error_t *err_desc, void *client_data);
static herr_t H5E_walk_stack_cb(unsigned n, const H5E_error_stack_t *err_desc, void *client_data);
@@ -1991,7 +1991,7 @@ H5E_print_stack(const H5E_t *estack, FILE *stream, hbool_t bk_compatible)
if(bk_compatible) {
if(H5E_walk_stack(estack, H5E_WALK_DOWNWARD, H5E_walk_cb, NULL, TRUE, (void*)&eprint)<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTLIST, FAIL, "can't walk error stack")
- } else {
+ } else {
if(H5E_walk_stack(estack, H5E_WALK_DOWNWARD, NULL, H5E_walk_stack_cb, FALSE, (void*)&eprint)<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTLIST, FAIL, "can't walk error stack")
}
@@ -2102,16 +2102,16 @@ done:
* means to start at the API and end at the inner-most function
* where the error was first detected.
*
- * The function pointed to by STACK_FUNC will be called for
- * each error record in the error stack. It's arguments will
- * include an index number (beginning at zero regardless of
- * stack traversal direction), an error stack entry, and the
+ * The function pointed to by STACK_FUNC will be called for
+ * each error record in the error stack. It's arguments will
+ * include an index number (beginning at zero regardless of
+ * stack traversal direction), an error stack entry, and the
* CLIENT_DATA pointer passed to H5E_print_stack.
*
* The function FUNC is also provided for backward compatibility.
* When BK_COMPATIBLE is set to be TRUE, FUNC is used to be
* compatible with older library. If BK_COMPATIBLE is FALSE,
- * STACK_FUNC is used.
+ * STACK_FUNC is used.
*
* Return: Non-negative on success/Negative on failure
*
@@ -2126,13 +2126,13 @@ done:
*
* Raymond Lu
* Friday, May 12, 2006
- * Added backward compatibility support. FUNC is for older
+ * Added backward compatibility support. FUNC is for older
* library; STACK_FUNC is for new library.
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5E_walk_stack(const H5E_t *estack, H5E_direction_t direction, H5E_walk_t func, H5E_walk_stack_t stack_func,
+H5E_walk_stack(const H5E_t *estack, H5E_direction_t direction, H5E_walk_t func, H5E_walk_stack_t stack_func,
hbool_t bk_compatible, void *client_data)
{
int i; /* Local index variable */
@@ -2216,9 +2216,9 @@ done:
* Purpose: This is a default error stack traversal callback function
* that prints error messages to the specified output stream.
* It is not meant to be called directly but rather as an
- * argument to the H5Ewalk_stack() function. This function is
- * called also by H5Eprint_stack(). Application writers are
- * encouraged to use this function as a model for their own
+ * argument to the H5Ewalk_stack() function. This function is
+ * called also by H5Eprint_stack(). Application writers are
+ * encouraged to use this function as a model for their own
* error stack walking functions.
*
* N is a counter for how many times this function has been
@@ -2351,7 +2351,7 @@ H5E_walk_stack_cb(unsigned n, const H5E_error_stack_t *err_desc, void *client_da
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Raymond Lu
+ * Programmer: Raymond Lu
* Thursday, May 11, 2006
*
* Modifications:
diff --git a/src/H5Eterm.h b/src/H5Eterm.h
index c2c206e..0b3a85e 100644
--- a/src/H5Eterm.h
+++ b/src/H5Eterm.h
@@ -20,180 +20,180 @@
#define _H5Eterm_H
/* Reset major error IDs */
-
-H5E_DATASET_g=
-H5E_FUNC_g=
-H5E_STORAGE_g=
-H5E_FILE_g=
-H5E_SYM_g=
-H5E_VFL_g=
-H5E_INTERNAL_g=
-H5E_BTREE_g=
-H5E_REFERENCE_g=
-H5E_DATASPACE_g=
-H5E_RESOURCE_g=
-H5E_PLIST_g=
-H5E_DATATYPE_g=
-H5E_RS_g=
-H5E_HEAP_g=
-H5E_OHDR_g=
-H5E_ATOM_g=
-H5E_ATTR_g=
-H5E_NONE_MAJOR_g=
-H5E_IO_g=
-H5E_SLIST_g=
-H5E_EFL_g=
-H5E_TST_g=
-H5E_ARGS_g=
-H5E_ERROR_g=
-H5E_PLINE_g=
-H5E_FSPACE_g=
+
+H5E_DATASET_g=
+H5E_FUNC_g=
+H5E_STORAGE_g=
+H5E_FILE_g=
+H5E_SYM_g=
+H5E_VFL_g=
+H5E_INTERNAL_g=
+H5E_BTREE_g=
+H5E_REFERENCE_g=
+H5E_DATASPACE_g=
+H5E_RESOURCE_g=
+H5E_PLIST_g=
+H5E_DATATYPE_g=
+H5E_RS_g=
+H5E_HEAP_g=
+H5E_OHDR_g=
+H5E_ATOM_g=
+H5E_ATTR_g=
+H5E_NONE_MAJOR_g=
+H5E_IO_g=
+H5E_SLIST_g=
+H5E_EFL_g=
+H5E_TST_g=
+H5E_ARGS_g=
+H5E_ERROR_g=
+H5E_PLINE_g=
+H5E_FSPACE_g=
H5E_CACHE_g= (-1);
/* Reset minor error IDs */
-/* Generic low-level file I/O errors */
-H5E_SEEKERROR_g=
-H5E_READERROR_g=
-H5E_WRITEERROR_g=
-H5E_CLOSEERROR_g=
-H5E_OVERFLOW_g=
+/* Generic low-level file I/O errors */
+H5E_SEEKERROR_g=
+H5E_READERROR_g=
+H5E_WRITEERROR_g=
+H5E_CLOSEERROR_g=
+H5E_OVERFLOW_g=
H5E_FCNTL_g=
-/* Resource errors */
-H5E_NOSPACE_g=
-H5E_CANTALLOC_g=
-H5E_CANTCOPY_g=
-H5E_CANTFREE_g=
-H5E_ALREADYEXISTS_g=
-H5E_CANTLOCK_g=
-H5E_CANTUNLOCK_g=
-H5E_CANTGC_g=
+/* Resource errors */
+H5E_NOSPACE_g=
+H5E_CANTALLOC_g=
+H5E_CANTCOPY_g=
+H5E_CANTFREE_g=
+H5E_ALREADYEXISTS_g=
+H5E_CANTLOCK_g=
+H5E_CANTUNLOCK_g=
+H5E_CANTGC_g=
H5E_CANTGETSIZE_g=
-/* Heap errors */
-H5E_CANTRESTORE_g=
-H5E_CANTCOMPUTE_g=
-H5E_CANTEXTEND_g=
-H5E_CANTATTACH_g=
+/* Heap errors */
+H5E_CANTRESTORE_g=
+H5E_CANTCOMPUTE_g=
+H5E_CANTEXTEND_g=
+H5E_CANTATTACH_g=
H5E_CANTUPDATE_g=
-/* Function entry/exit interface errors */
-H5E_CANTINIT_g=
-H5E_ALREADYINIT_g=
+/* Function entry/exit interface errors */
+H5E_CANTINIT_g=
+H5E_ALREADYINIT_g=
H5E_CANTRELEASE_g=
-/* Property list errors */
-H5E_CANTGET_g=
-H5E_CANTSET_g=
+/* Property list errors */
+H5E_CANTGET_g=
+H5E_CANTSET_g=
H5E_DUPCLASS_g=
-/* Free space errors */
-H5E_CANTMERGE_g=
-H5E_CANTREVIVE_g=
+/* Free space errors */
+H5E_CANTMERGE_g=
+H5E_CANTREVIVE_g=
H5E_CANTSHRINK_g=
-/* Object header related errors */
-H5E_LINKCOUNT_g=
-H5E_VERSION_g=
-H5E_ALIGNMENT_g=
-H5E_BADMESG_g=
-H5E_CANTDELETE_g=
-H5E_BADITER_g=
+/* Object header related errors */
+H5E_LINKCOUNT_g=
+H5E_VERSION_g=
+H5E_ALIGNMENT_g=
+H5E_BADMESG_g=
+H5E_CANTDELETE_g=
+H5E_BADITER_g=
H5E_CANTPACK_g=
-/* System level errors */
+/* System level errors */
H5E_SYSERRSTR_g=
-/* I/O pipeline errors */
-H5E_NOFILTER_g=
-H5E_CALLBACK_g=
-H5E_CANAPPLY_g=
-H5E_SETLOCAL_g=
+/* I/O pipeline errors */
+H5E_NOFILTER_g=
+H5E_CALLBACK_g=
+H5E_CANAPPLY_g=
+H5E_SETLOCAL_g=
H5E_NOENCODER_g=
-/* Group related errors */
-H5E_CANTOPENOBJ_g=
-H5E_CANTCLOSEOBJ_g=
-H5E_COMPLEN_g=
-H5E_LINK_g=
-H5E_SLINK_g=
+/* Group related errors */
+H5E_CANTOPENOBJ_g=
+H5E_CANTCLOSEOBJ_g=
+H5E_COMPLEN_g=
+H5E_LINK_g=
+H5E_SLINK_g=
H5E_PATH_g=
-/* No error */
+/* No error */
H5E_NONE_MINOR_g=
-/* File accessability errors */
-H5E_FILEEXISTS_g=
-H5E_FILEOPEN_g=
-H5E_CANTCREATE_g=
-H5E_CANTOPENFILE_g=
-H5E_CANTCLOSEFILE_g=
-H5E_NOTHDF5_g=
-H5E_BADFILE_g=
-H5E_TRUNCATED_g=
+/* File accessability errors */
+H5E_FILEEXISTS_g=
+H5E_FILEOPEN_g=
+H5E_CANTCREATE_g=
+H5E_CANTOPENFILE_g=
+H5E_CANTCLOSEFILE_g=
+H5E_NOTHDF5_g=
+H5E_BADFILE_g=
+H5E_TRUNCATED_g=
H5E_MOUNT_g=
-/* Object atom related errors */
-H5E_BADATOM_g=
-H5E_BADGROUP_g=
-H5E_CANTREGISTER_g=
-H5E_CANTINC_g=
-H5E_CANTDEC_g=
+/* Object atom related errors */
+H5E_BADATOM_g=
+H5E_BADGROUP_g=
+H5E_CANTREGISTER_g=
+H5E_CANTINC_g=
+H5E_CANTDEC_g=
H5E_NOIDS_g=
-/* Cache related errors */
-H5E_CANTFLUSH_g=
-H5E_CANTSERIALIZE_g=
-H5E_CANTLOAD_g=
-H5E_PROTECT_g=
-H5E_NOTCACHED_g=
-H5E_SYSTEM_g=
-H5E_CANTINS_g=
-H5E_CANTRENAME_g=
-H5E_CANTPROTECT_g=
-H5E_CANTUNPROTECT_g=
-H5E_CANTPIN_g=
-H5E_CANTUNPIN_g=
-H5E_CANTMARKDIRTY_g=
+/* Cache related errors */
+H5E_CANTFLUSH_g=
+H5E_CANTSERIALIZE_g=
+H5E_CANTLOAD_g=
+H5E_PROTECT_g=
+H5E_NOTCACHED_g=
+H5E_SYSTEM_g=
+H5E_CANTINS_g=
+H5E_CANTRENAME_g=
+H5E_CANTPROTECT_g=
+H5E_CANTUNPROTECT_g=
+H5E_CANTPIN_g=
+H5E_CANTUNPIN_g=
+H5E_CANTMARKDIRTY_g=
H5E_CANTDIRTY_g=
-/* Parallel MPI errors */
-H5E_MPI_g=
-H5E_MPIERRSTR_g=
+/* Parallel MPI errors */
+H5E_MPI_g=
+H5E_MPIERRSTR_g=
H5E_CANTRECV_g=
-/* Dataspace errors */
-H5E_CANTCLIP_g=
-H5E_CANTCOUNT_g=
-H5E_CANTSELECT_g=
-H5E_CANTNEXT_g=
-H5E_BADSELECT_g=
+/* Dataspace errors */
+H5E_CANTCLIP_g=
+H5E_CANTCOUNT_g=
+H5E_CANTSELECT_g=
+H5E_CANTNEXT_g=
+H5E_BADSELECT_g=
H5E_CANTCOMPARE_g=
-/* B-tree related errors */
-H5E_NOTFOUND_g=
-H5E_EXISTS_g=
-H5E_CANTENCODE_g=
-H5E_CANTDECODE_g=
-H5E_CANTSPLIT_g=
-H5E_CANTREDISTRIBUTE_g=
-H5E_CANTSWAP_g=
-H5E_CANTINSERT_g=
-H5E_CANTLIST_g=
-H5E_CANTMODIFY_g=
+/* B-tree related errors */
+H5E_NOTFOUND_g=
+H5E_EXISTS_g=
+H5E_CANTENCODE_g=
+H5E_CANTDECODE_g=
+H5E_CANTSPLIT_g=
+H5E_CANTREDISTRIBUTE_g=
+H5E_CANTSWAP_g=
+H5E_CANTINSERT_g=
+H5E_CANTLIST_g=
+H5E_CANTMODIFY_g=
H5E_CANTREMOVE_g=
-/* Argument errors */
-H5E_UNINITIALIZED_g=
-H5E_UNSUPPORTED_g=
-H5E_BADTYPE_g=
-H5E_BADRANGE_g=
+/* Argument errors */
+H5E_UNINITIALIZED_g=
+H5E_UNSUPPORTED_g=
+H5E_BADTYPE_g=
+H5E_BADRANGE_g=
H5E_BADVALUE_g=
-/* Datatype conversion errors */
-H5E_CANTCONVERT_g=
+/* Datatype conversion errors */
+H5E_CANTCONVERT_g=
H5E_BADSIZE_g= (-1);
#endif /* H5Eterm_H */
diff --git a/src/H5F.c b/src/H5F.c
index 0d80e51..24f4e9c 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -1428,8 +1428,8 @@ done:
* property list changes.
*
* J Mainzer, Jun 30, 2005
- * Added lf parameter so the shared->lf field can be
- * initialized prior to the call to H5AC_create() if a
+ * Added lf parameter so the shared->lf field can be
+ * initialized prior to the call to H5AC_create() if a
* new instance of H5F_file_t is created. lf should be
* NULL if shared isn't, and vise versa.
*
@@ -1458,7 +1458,7 @@ H5F_new(H5F_file_t *shared, hid_t fcpl_id, hid_t fapl_id, H5FD_t *lf)
f->shared->freespace_addr = HADDR_UNDEF;
f->shared->driver_addr = HADDR_UNDEF;
f->shared->lf = lf;
-
+
/*
* Copy the file creation and file access property lists into the
* new file handle. We do this early because some values might need
diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c
index 0e4a453..c826163 100644
--- a/src/H5FDfamily.c
+++ b/src/H5FDfamily.c
@@ -60,7 +60,7 @@ typedef struct H5FD_family_t {
hid_t memb_fapl_id; /*file access property list for members */
hsize_t memb_size; /*actual size of each member file */
hsize_t pmem_size; /*member size passed in from property */
- hsize_t mem_newsize; /*new member size passed in as private
+ hsize_t mem_newsize; /*new member size passed in as private
*property. It's used only by h5repart */
unsigned nmembs; /*number of family members */
unsigned amembs; /*number of member slots allocated */
diff --git a/src/H5FDmpi.h b/src/H5FDmpi.h
index b7b4162..6c2a2c5 100644
--- a/src/H5FDmpi.h
+++ b/src/H5FDmpi.h
@@ -21,8 +21,8 @@
#ifndef H5FDmpi_H
#define H5FDmpi_H
-/***** Macros for One linked collective IO case. *****/
-/* The default value to do one linked collective IO for all chunks.
+/***** Macros for One linked collective IO case. *****/
+/* The default value to do one linked collective IO for all chunks.
If the average number of chunks per process is greater than this value,
the library will create an MPI derived datatype to link all chunks to do collective IO.
The user can set this value through an API. */
diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c
index f639182..0be55e3 100644
--- a/src/H5FDmpio.c
+++ b/src/H5FDmpio.c
@@ -543,14 +543,14 @@ Description:
The library won't behave as it asks for only when we find
that the low-level MPI-IO package doesn't support this.
-Parameters:
+Parameters:
hid_t dxpl_id in: Data transfer property list identifier
H5FD_mpio_chunk_opt_t in: The optimization flag for linked chunk IO
or multi-chunk IO.
-
-Returns:
-Returns a non-negative value if successful. Otherwise returns a negative value.
+
+Returns:
+Returns a non-negative value if successful. Otherwise returns a negative value.
*
*-------------------------------------------------------------------------
*/
@@ -590,15 +590,15 @@ Purpose:
To set a threshold for doing linked chunk IO
Description:
- If the number is greater than the threshold set by the user,
+ If the number is greater than the threshold set by the user,
the library will do linked chunk IO; otherwise, IO will be done for every chunk.
-Parameters:
+Parameters:
hid_t dxpl_id in: Data transfer property list identifier
- unsigned num_proc_per_chunk in: the threshold of the average number of chunks selected by each process
+ unsigned num_proc_per_chunk in: the threshold of the average number of chunks selected by each process
-Returns:
-Returns a non-negative value if successful. Otherwise returns a negative value.
+Returns:
+Returns a non-negative value if successful. Otherwise returns a negative value.
*
*-------------------------------------------------------------------------
*/
@@ -637,13 +637,13 @@ Purpose:
To set a threshold for doing collective IO for each chunk
Description:
The library will calculate the percentage of the number of process holding selections at each chunk. If that percentage of number of process in the individual chunk is greater than the threshold set by the user, the library will do collective chunk IO for this chunk; otherwise, independent IO will be done for this chunk.
-Parameters:
- hid_t dxpl_id
+Parameters:
+ hid_t dxpl_id
in: Data transfer property list identifier
- unsigned percent_num_proc_per_chunk
+ unsigned percent_num_proc_per_chunk
in: the threshold of the percentage of the number of process holding selections per chunk
-Returns:
-Returns a non-negative value if successful. Otherwise returns a negative value.
+Returns:
+Returns a non-negative value if successful. Otherwise returns a negative value.
*
@@ -1701,7 +1701,7 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
/* The metadata cache now only writes from process 0, which makes
* this synchronization incorrect. I'm leaving this code commented
* out instead of deleting it to remind us that we should re-write
- * this function so that a metadata write from any other process
+ * this function so that a metadata write from any other process
* should flag an error.
* -- JRM 9/1/05
*/
@@ -1772,13 +1772,13 @@ done:
#if 0 /* JRM */
/* Since metadata writes are now done by process 0 only, this broadcast
- * is no longer needed. I leave it in and commented out to remind us
+ * is no longer needed. I leave it in and commented out to remind us
* that we need to re-work this function to reflect this reallity.
*
* -- JRM 9/1/05
*/
- /* if only one process writes, need to broadcast the ret_value to
- * other processes
+ /* if only one process writes, need to broadcast the ret_value to
+ * other processes
*/
if (type!=H5FD_MEM_DRAW) {
if (MPI_SUCCESS != (mpi_code=MPI_Bcast(&ret_value, sizeof(ret_value), MPI_BYTE, H5_PAR_META_WRITE, file->comm)))
diff --git a/src/H5FDmpiposix.c b/src/H5FDmpiposix.c
index de491f0..2809539 100644
--- a/src/H5FDmpiposix.c
+++ b/src/H5FDmpiposix.c
@@ -913,7 +913,7 @@ done:
* John Mainzer -- 9/21/05
* Modified code to turn off the
* H5FD_FEAT_ACCUMULATE_METADATA_WRITE flag.
- * With the movement of all cache writes to process 0,
+ * With the movement of all cache writes to process 0,
* this flag has become problematic in PHDF5.
*
*-------------------------------------------------------------------------
@@ -1233,10 +1233,10 @@ H5FD_mpiposix_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
/* Metadata specific actions */
/* All metadata is now written from process 0 -- thus this function
- * needs to be re-written to reflect this. For now I have simply
- * commented out the code that attempts to synchronize metadata
+ * needs to be re-written to reflect this. For now I have simply
+ * commented out the code that attempts to synchronize metadata
* writes between processes, but we should really just flag an error
- * whenever any process other than process 0 attempts to write
+ * whenever any process other than process 0 attempts to write
* metadata.
* -- JRM 9/1/05
*/
@@ -1394,7 +1394,7 @@ H5FD_mpiposix_flush(H5FD_t *_file, hid_t UNUSED dxpl_id, unsigned UNUSED closing
if(file->mpi_rank == H5_PAR_META_WRITE) {
#ifdef WIN32
/* Map the posix file handle to a Windows file handle */
- filehandle = _get_osfhandle(file->fd);
+ filehandle = _get_osfhandle(file->fd);
/* Translate 64-bit integers into form Windows wants */
/* [This algorithm is from the Windows documentation for SetFilePointer()] */
diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c
index 652bd53..e7de49f 100644
--- a/src/H5FDmulti.c
+++ b/src/H5FDmulti.c
@@ -1631,7 +1631,7 @@ H5FD_multi_alloc(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, hsize_t size)
if ( addr + size > file->eoa ) {
if ( H5FD_multi_set_eoa(_file, addr + size) < 0 ) {
-
+
H5Epush_ret(func, H5E_ERR_CLS, H5E_INTERNAL, H5E_BADVALUE, \
"can't set eoa", HADDR_UNDEF)
}
diff --git a/src/H5FS.c b/src/H5FS.c
index 87fb4dd..5094405 100644
--- a/src/H5FS.c
+++ b/src/H5FS.c
@@ -1342,7 +1342,7 @@ HDfprintf(stderr, "%s: sect->size = %Hu, sect->addr = %a, sect->type = %u\n", FU
#ifdef QAK
HDfprintf(stderr, "%s: Returning space\n", FUNC);
#endif /* QAK */
-
+
/* Attempt to merge returned section with existing sections */
if(H5FS_sect_merge(f, dxpl_id, fspace, &sect, op_data) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTMERGE, FAIL, "can't merge sections")
diff --git a/src/H5FSprivate.h b/src/H5FSprivate.h
index 281bb57..4ce9d88 100644
--- a/src/H5FSprivate.h
+++ b/src/H5FSprivate.h
@@ -117,7 +117,7 @@ typedef herr_t (*H5FS_operator_t)(const H5FS_section_info_t *sect,
/*****************************/
/* Library-private Variables */
/*****************************/
-
+
/* Declare a free list to manage the H5FS_section_class_t sequence information */
H5FL_SEQ_EXTERN(H5FS_section_class_t);
diff --git a/src/H5G.c b/src/H5G.c
index 34d3c56..c3e300a 100644
--- a/src/H5G.c
+++ b/src/H5G.c
@@ -292,9 +292,9 @@ done:
*
* Usage: H5Gcreate_expand(loc_id, char *name, gcpl_id, gapl_id)
* hid_t loc_id; IN: File or group identifier
- * const char *name; IN: Absolute or relative name of the new group
- * hid_t gcpl_id; IN: Property list for group creation
- * hid_t gapl_id; IN: Property list for group access
+ * const char *name; IN: Absolute or relative name of the new group
+ * hid_t gcpl_id; IN: Property list for group creation
+ * hid_t gapl_id; IN: Property list for group access
*
* Example: To create missing groups "A" and "B01" along the given path "/A/B01/grp"
* hid_t create_id = H5Pcreate(H5P_GROUP_CREATE);
@@ -1066,23 +1066,23 @@ done:
/*-------------------------------------------------------------------------
* Function: H5Gcopy
*
- * Purpose: Copy an object (group or dataset) to destination location
+ * Purpose: Copy an object (group or dataset) to destination location
* within a file or cross files. PLIST_ID is a property list
* which is used to pass user options and properties to the
- * copy.
+ * copy.
*
* OPTIONS THAT MAY APPLY TO COPY IN THE FUTURE.
* H5G_COPY_CREATE_INTERMEDIATE_GROUP_FLAG
* Do not create missing groups when create a group (default)
* Create missing groups when create a group
* H5G_COPY_SHALLOW_HIERARCHY_FLAG
- * Recursively copy all objects below the group (default)
+ * Recursively copy all objects below the group (default)
* Only immediate members.
* H5G_COPY_EXPAND_SOFT_LINK_FLAG
- * Keep soft links as they are (default)
+ * Keep soft links as they are (default)
* Expand them into new objects
* H5G_COPY_EXPAND_EXT_LINK_FLAG
- * Keep external links as they are (default)
+ * Keep external links as they are (default)
* Expand them into new objects
* H5G_COPY_EXPAND_OBJ_REFERENCE_FLAG
* Update only the values of object references (default)
@@ -1094,26 +1094,26 @@ done:
* PROPERTIES THAT MAY APPLY TO COPY IN FUTURE
* Change data layout such as chunk size
* Add filter such as data compression.
- * Add an attribute to the copied object(s) that say the date/time
+ * Add an attribute to the copied object(s) that say the date/time
* for the copy or other information about the source file.
*
* Usage: H5Gcopy(src_loc_id, src_name, dst_loc_id, dst_name, plist_id)
- * hid_t src_loc_id IN: Source file or group identifier.
+ * hid_t src_loc_id IN: Source file or group identifier.
* const char *src_name IN: Name of the source object to be copied
- * hid_t dst_loc_id IN: Destination file or group identifier
- * const char *dst_name IN: Name of the destination object
- * hid_t plist_id IN: Properties which apply to the copy
- *
+ * hid_t dst_loc_id IN: Destination file or group identifier
+ * const char *dst_name IN: Name of the destination object
+ * hid_t plist_id IN: Properties which apply to the copy
+ *
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Peter Cao
- * June 4, 2005
+ * Programmer: Peter Cao
+ * June 4, 2005
*
*-------------------------------------------------------------------------
*/
herr_t
-H5Gcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id,
+H5Gcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id,
const char *dst_name, hid_t plist_id)
{
H5G_loc_t loc; /* Source group group location */
@@ -2987,12 +2987,12 @@ H5G_unmount(H5G_t *grp)
/*-------------------------------------------------------------------------
* Function: H5G_copy
*
- * Purpose: Copy an object to destination location
+ * Purpose: Copy an object to destination location
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Peter Cao
- * June 4, 2005
+ * Programmer: Peter Cao
+ * June 4, 2005
*
*-------------------------------------------------------------------------
*/
diff --git a/src/H5Gloc.c b/src/H5Gloc.c
index a9e907f..57212b7 100644
--- a/src/H5Gloc.c
+++ b/src/H5Gloc.c
@@ -37,7 +37,7 @@
/* User data for looking up an object in a group */
typedef struct {
- H5G_loc_t *loc; /* Group location to set */
+ H5G_loc_t *loc; /* Group location to set */
} H5G_loc_ud1_t;
/* Private macros */
diff --git a/src/H5Gname.c b/src/H5Gname.c
index cc4e77b..fdae809 100644
--- a/src/H5Gname.c
+++ b/src/H5Gname.c
@@ -530,7 +530,7 @@ H5G_name_move_path(H5RS_str_t **path_r_ptr, const char *full_suffix, const char
/* Get pointer to path to update */
path = H5RS_get_str(*path_r_ptr);
HDassert(path);
-
+
/* Check if path needs to be updated */
full_suffix_len = HDstrlen(full_suffix);
path_len = HDstrlen(path);
diff --git a/src/H5Gnode.c b/src/H5Gnode.c