summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorScot Breitenfeld <brtnfld@hdfgroup.org>2022-01-19 15:41:56 (GMT)
committerGitHub <noreply@github.com>2022-01-19 15:41:56 (GMT)
commit5875cb94648b5b0b7a0923306b3084d373f1735f (patch)
tree58562552e1de458944465c7d990e6c655a8fee17
parente72e64b87b77bb1ba32642528f19db1217869547 (diff)
downloadhdf5-5875cb94648b5b0b7a0923306b3084d373f1735f.zip
hdf5-5875cb94648b5b0b7a0923306b3084d373f1735f.tar.gz
hdf5-5875cb94648b5b0b7a0923306b3084d373f1735f.tar.bz2
spelling corrections (#1382)inactive/selection_io_with_subfiling_vfd
* spelling correction
-rw-r--r--.github/workflows/codespell.yml2
-rw-r--r--doxygen/examples/DebuggingHDF5Applications.html2
-rw-r--r--doxygen/examples/Filters.html8
-rw-r--r--hl/src/H5DS.c4
-rw-r--r--java/src/hdf/hdf5lib/H5.java2
-rw-r--r--java/src/hdf/hdf5lib/package-info.java2
-rw-r--r--m4/aclocal_fc.m44
-rw-r--r--release_docs/RELEASE.txt4
-rw-r--r--src/H5FDioc.c2
-rw-r--r--src/H5FDioc.h2
-rw-r--r--src/H5FDioc_threads.c6
-rw-r--r--src/H5FDsubfile_int.c8
-rw-r--r--src/H5FDsubfile_mpi.c26
-rw-r--r--src/H5FDsubfiling.c24
-rw-r--r--src/H5FDsubfiling.h12
-rw-r--r--src/H5FDsubfiling_priv.h36
-rw-r--r--src/H5HF.c2
-rw-r--r--src/H5SL.c2
-rw-r--r--src/mercury/include/na.h2
-rw-r--r--src/mercury/include/na_sm.h2
-rw-r--r--test/cork.c2
-rw-r--r--testpar/t_subfiling_vfd.c4
-rw-r--r--testpar/t_vfd.c4
-rw-r--r--utils/tools/h5dwalk/h5dwalk.12
-rw-r--r--utils/tools/h5dwalk/h5dwalk.c6
25 files changed, 85 insertions, 85 deletions
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index 163353e..e294d99 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -12,4 +12,4 @@ jobs:
- uses: codespell-project/actions-codespell@master
with:
skip: ./bin/trace,./hl/tools/h5watch/h5watch.c,./tools/test/h5jam/tellub.c
- ignore_words_list: isnt,inout,nd,parms,parm,ba,offsetP,ser,ois,had,fiter,fo,clude,refere,minnum,offsetp,creat,ans:,eiter,lastr,ans,isn't,ifset,sur,trun,dne,tthe,hda,filname,te,htmp,minnum
+ ignore_words_list: isnt,inout,nd,parms,parm,ba,offsetP,ser,ois,had,fiter,fo,clude,refere,minnum,offsetp,creat,ans:,eiter,lastr,ans,isn't,ifset,sur,trun,dne,tthe,hda,filname,te,htmp,minnum,numer
diff --git a/doxygen/examples/DebuggingHDF5Applications.html b/doxygen/examples/DebuggingHDF5Applications.html
index c6aaf74..3390887 100644
--- a/doxygen/examples/DebuggingHDF5Applications.html
+++ b/doxygen/examples/DebuggingHDF5Applications.html
@@ -173,7 +173,7 @@ IOT Trap, core dumped.
<tr>
<td align="center">mm</td>
<td align="center">Yes</td>
- <td>Library memory managment</td>
+ <td>Library memory management</td>
</tr>
<tr>
<td align="center">o</td>
diff --git a/doxygen/examples/Filters.html b/doxygen/examples/Filters.html
index 2d5bc5e..7054a3b 100644
--- a/doxygen/examples/Filters.html
+++ b/doxygen/examples/Filters.html
@@ -54,7 +54,7 @@
<p>Two types of filters can be applied to raw data I/O: permanent
filters and transient filters. The permanent filter pipeline is
- defned when the dataset is created while the transient pipeline
+ defined when the dataset is created while the transient pipeline
is defined for each I/O operation. During an
<code>H5Dwrite()</code> the transient filters are applied first
in the order defined and then the permanent filters are applied
@@ -211,7 +211,7 @@
should be turned on so such cases can be handled gracefully by
storing the original data instead of the compressed data. The
<em>cd_nvalues</em> should be one with <em>cd_value[0]</em>
- being a compression agression level between zero and nine,
+ being a compression aggression level between zero and nine,
inclusive (zero is the fastest compression while nine results in
the best compression ratio).
@@ -252,7 +252,7 @@
</p><p>The function that acts as the filter always returns zero
(failure) if the <code>md5()</code> function was not detected at
- configuration time (left as an excercise for the reader).
+ configuration time (left as an exercise for the reader).
Otherwise the function is broken down to an input and output
half. The output half calculates a checksum, increases the size
of the output buffer if necessary, and appends the checksum to
@@ -374,7 +374,7 @@
<tr valign="top">
<td>Method</td>
<td>This is the name of the method as defined with
- <code>H5Zregister()</code> with the charaters
+ <code>H5Zregister()</code> with the characters
"&lt; or "&gt;" prepended to indicate
input or output.</td>
</tr>
diff --git a/hl/src/H5DS.c b/hl/src/H5DS.c
index dae20cd..387419b 100644
--- a/hl/src/H5DS.c
+++ b/hl/src/H5DS.c
@@ -1122,7 +1122,7 @@ H5DSdetach_scale(hid_t did, hid_t dsid, unsigned int idx)
goto out;
} /* nelmts */
- /* Free refrences */
+ /* Free references */
if (is_new_ref) {
if (H5Treclaim(tid, sid, H5P_DEFAULT, ndsbuf) < 0)
goto out;
@@ -1633,7 +1633,7 @@ H5DSiterate_scales(hid_t did, unsigned int dim, int *ds_idx, H5DS_iterate_t visi
if (H5DSwith_new_ref(did, &is_new_ref) < 0)
return FAIL;
- /* get the number of scales assotiated with this DIM */
+ /* get the number of scales associated with this DIM */
if ((nscales = H5DSget_num_scales(did, dim)) < 0)
return FAIL;
diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java
index c8504eb..49a539b 100644
--- a/java/src/hdf/hdf5lib/H5.java
+++ b/java/src/hdf/hdf5lib/H5.java
@@ -161,7 +161,7 @@ import hdf.hdf5lib.structs.H5O_token_t;
* disk (source) and in memory (destination).
* <p>
* For Java, this ``ANY'' is a problem, as the type of data must always be declared. Furthermore, multidimensional
- * arrays are definitely <i>not</i> layed out contiguously in memory. It would be infeasible to declare a separate
+ * arrays are definitely <i>not</i> laid out contiguously in memory. It would be infeasible to declare a separate
* routine for every combination of number type and dimensionality. For that reason, the
* <a href="./hdf.hdf5lib.HDFArray.html"><b>HDFArray</b></a> class is used to discover the type, shape, and size of the
* data array at run time, and to convert to and from a contiguous array of bytes in synchronized static native C order.
diff --git a/java/src/hdf/hdf5lib/package-info.java b/java/src/hdf/hdf5lib/package-info.java
index 3817f97..2863f5a 100644
--- a/java/src/hdf/hdf5lib/package-info.java
+++ b/java/src/hdf/hdf5lib/package-info.java
@@ -114,7 +114,7 @@
* disk (source) and in memory (destination).
* <p>
* For Java, this ``ANY'' is a problem, as the type of data must always be declared. Furthermore, multidimensional
- * arrays are definitely <i>not</i> layed out contiguously in memory. It would be infeasible to declare a separate
+ * arrays are definitely <i>not</i> laid out contiguously in memory. It would be infeasible to declare a separate
* routine for every combination of number type and dimensionality. For that reason, the
* <a href="./hdf.hdf5lib.HDFArray.html"><b>HDFArray</b></a> class is used to discover the type, shape, and size of the
* data array at run time, and to convert to and from a contiguous array of bytes in synchronized static native C order.
diff --git a/m4/aclocal_fc.m4 b/m4/aclocal_fc.m4
index 39ac8a6..9d01f47 100644
--- a/m4/aclocal_fc.m4
+++ b/m4/aclocal_fc.m4
@@ -260,8 +260,8 @@ TEST_SRC="`sed -n '/PROGRAM FC_AVAIL_KINDS/,/END PROGRAM FC_AVAIL_KINDS/p' $srcd
AC_RUN_IFELSE([$TEST_SRC],
[
dnl The output from the above program will be:
- dnl -- LINE 1 -- valid integer kinds (comma seperated list)
- dnl -- LINE 2 -- valid real kinds (comma seperated list)
+ dnl -- LINE 1 -- valid integer kinds (comma separated list)
+ dnl -- LINE 2 -- valid real kinds (comma separated list)
dnl -- LINE 3 -- max decimal precision for reals
dnl -- LINE 4 -- number of valid integer kinds
dnl -- LINE 5 -- number of valid real kinds
diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt
index c46d90c..a828798 100644
--- a/release_docs/RELEASE.txt
+++ b/release_docs/RELEASE.txt
@@ -1118,7 +1118,7 @@ Bug Fixes since HDF5-1.12.0 release
dimension sizes.
The problem was fixed by ensuring that decoding the dimension sizes
- and max values will not go beyong the end of the buffer.
+ and max values will not go beyond the end of the buffer.
(BMR - 2021/05/12, HDFFV-11223)
@@ -1452,7 +1452,7 @@ Bug Fixes since HDF5-1.12.0 release
- h5diff fixed a command line parsing error.
h5diff would ignore the argument to -d (delta) if it is smaller than DBL_EPSILON.
- The macro H5_DBL_ABS_EQUAL was removed and a direct value comparision was used.
+ The macro H5_DBL_ABS_EQUAL was removed and a direct value comparison was used.
(ADB - 2020/07/20, HDFFV-10897)
diff --git a/src/H5FDioc.c b/src/H5FDioc.c
index fdbe41e..5f12a0e 100644
--- a/src/H5FDioc.c
+++ b/src/H5FDioc.c
@@ -201,7 +201,7 @@ H5FD__init_package(void)
#endif /* JRM */
#if 0 /* JRM */
- HDfprintf(stdout, "H5FD_ioc_init() IOC registerd. id = %lld \n", (int64_t)H5FD_IOC_g);
+ HDfprintf(stdout, "H5FD_ioc_init() IOC registered. id = %lld \n", (int64_t)H5FD_IOC_g);
#endif /* JRM */
if (H5I_INVALID_HID == H5FD_IOC_g)
diff --git a/src/H5FDioc.h b/src/H5FDioc.h
index d0fa6cd..f9f32eb 100644
--- a/src/H5FDioc.h
+++ b/src/H5FDioc.h
@@ -64,7 +64,7 @@ typedef struct config_common_t {
uint32_t magic; /* set to H5FD_SUBFILING_FAPL_T_MAGIC */
uint32_t version; /* set to H5FD_CURR_SUBFILING_FAPL_T_VERSION */
int32_t stripe_count; /* How many io concentrators */
- int64_t stripe_depth; /* Max # of bytes in contigious IO to an IOC */
+ int64_t stripe_depth; /* Max # of bytes in contiguous IO to an IOC */
ioc_selection_t ioc_selection; /* Method to select IO Concentrators */
hid_t ioc_fapl_id; /* The hid_t value of the stacked VFD */
int64_t context_id; /* The value used to lookup an IOC context */
diff --git a/src/H5FDioc_threads.c b/src/H5FDioc_threads.c
index 0c3d23c..6aff531 100644
--- a/src/H5FDioc_threads.c
+++ b/src/H5FDioc_threads.c
@@ -89,7 +89,7 @@ ioc_thread_main(void *arg)
* Purpose: The principal entry point to initialize the execution
* context for an IO Concentrator (IOC). The main thread
* is responsible for receiving IO requests from each
- * HDF5 "client" and distibuting those to helper threads
+ * HDF5 "client" and distributing those to helper threads
* for actual processing. We initialize a fixed number
* of helper threads by creating a thread_pool.
*
@@ -292,7 +292,7 @@ translate_opcode(io_op_t op)
*
*-------------------------------------------------------------------------
*/
-#if 0 /* JRM */ /* Original version -- expects sf_work_request_t * as its arguement */
+#if 0 /* JRM */ /* Original version -- expects sf_work_request_t * as its argument */
static HG_THREAD_RETURN_TYPE
handle_work_request(void *arg)
{
@@ -891,7 +891,7 @@ H5FD_ioc__dispatch_elegible_io_q_entries(void)
scan_len = scan_ptr->wk_req.header[0];
/* at present, I/O requests are scalar -- i.e. single blocks specified by offset and length.
- * when this changes, this if statment will have to be updated accordingly.
+ * when this changes, this if statement will have to be updated accordingly.
*/
if (!(((scan_offset + scan_len) < entry_offset) ||
((entry_offset + entry_len) < scan_offset))) {
diff --git a/src/H5FDsubfile_int.c b/src/H5FDsubfile_int.c
index ecf706f..54e5de2 100644
--- a/src/H5FDsubfile_int.c
+++ b/src/H5FDsubfile_int.c
@@ -214,7 +214,7 @@ int sf_open_file_count = 0;
* Function: Public/Client set_verbose_flag
*
* Purpose: For debugging purposes, I allow a verbose setting to
- * have printing of relevent information into an IOC specific
+ * have printing of relevant information into an IOC specific
* file that is opened as a result of enabling the flag
* and closed when the verbose setting is disabled.
*
@@ -349,7 +349,7 @@ record_fid_to_subfile(uint64_t fid, hid_t subfile_context_id, int *next_index)
* inode values, these being constant for all processes
* opening the shared file. The inode value is utilized
* as a key value and is associated with the sf_context
- * which we recieve as one of the input arguments.
+ * which we receive as one of the input arguments.
*
* IO Concentrator threads will be initialized on MPI ranks
* which have been identified via application toplogy
@@ -1468,7 +1468,7 @@ done:
* storage arrays can be stack based rather than explicitly
* allocated and freed.
*
- * The Internal function is resposible for sending all IOC
+ * The Internal function is responsible for sending all IOC
* instances, the (sub)file open requests.
*
* Prior to calling the internal open function, we initialize
@@ -1498,7 +1498,7 @@ H5FD__open_subfiles(void *_config_info, uint64_t h5_file_id, int flags)
char * option_arg = get_ioc_selection_criteria(&ioc_selection);
HDassert(config_info);
- /* Check to see who is calling ths function::
+ /* Check to see who is calling the function::
* We only allow the ioc or subfiling VFDs
*/
if ((config_info->magic != H5FD_IOC_FAPL_T_MAGIC) &&
diff --git a/src/H5FDsubfile_mpi.c b/src/H5FDsubfile_mpi.c
index 4212676..79ed2b6 100644
--- a/src/H5FDsubfile_mpi.c
+++ b/src/H5FDsubfile_mpi.c
@@ -196,7 +196,7 @@ numDigits(int n)
/* ===================================================================== */
/* MPI_Datatype Creation functions.
- * These are catagorized by usage paterns, i.e. when data is sent to or
+ * These are categorized by usage patterns, i.e. when data is sent to or
* received from and IOC, the initial data offset provided by the user
* may or may NOT start on a stripe boundary. Because this, the initial
* data segment to the selected IOC will often be less than 'stripe_size'
@@ -449,7 +449,7 @@ H5FD__create_f_l_mpi_type(subfiling_context_t *context, int ioc_depth, int64_t s
* Concentrator (IOC).
*
* Each data segment is of 'stripe_size' length and will be
- * seperated from a previous or following segment by
+ * separated from a previous or following segment by
* 'sf_blocksize_per_stripe' bytes of data.
*
* Return: The MPI_Datatype that will be used to send or receive data.
@@ -527,7 +527,7 @@ H5FD__create_mpi_uniform_type(subfiling_context_t *context, int ioc_depth, int64
*
* Return: A filled set of vectors. As a consequence of not allowing
* use of MPI derived datatypes in the VFD layer, we need to
- * accomodate the possiblity that large IO transactions will
+ * accommodate the possibility that large IO transactions will
* be required to use multiple IOs per IOC.
*
* Example: Using 4 IOCs, each with 1M stripe-depth; when
@@ -718,7 +718,7 @@ init__indep_io(void *_sf_context, size_t maxdepth, int H5_ATTR_PARALLEL_UNUSED i
*
* We cycle through all 'n_io_conentrators' and send a
* descriptor to each IOC that has a non-zero sized IO
- * request to fullfill.
+ * request to fulfill.
*
* Sending descriptors to an IOC usually gets an ACK or
* NACK in response. For the read operations, we post
@@ -835,7 +835,7 @@ read__independent_async(int n_io_concentrators, hid_t context_id, int64_t offset
* Purpose: We provide a utility function to generate a subfiling
* filename from a template. While the user provides a
* name which will serve as the HDF5 file name, sub-filing
- * files are related to the user filename via the filesytem
+ * files are related to the user filename via the filesystem
* inode identifier. The inode id can be utilized as a
* global unique identifier (GUID) which provides a
* grouping ID to easily distinguish subfiles.
@@ -935,7 +935,7 @@ write_data(io_func_t *this_func)
* until the request is completed as indicated by a non-zero
* flag variable.
*
- * As we go further with the implemention, we anticipate that
+ * As we go further with the implementation, we anticipate that
* rather than testing a single request variable, we will
* deal with a collection of all pending IO requests (on
* this rank).
@@ -999,7 +999,7 @@ async_completion(void *arg)
*
* We cycle through all 'n_io_conentrators' and send a
* descriptor to each IOC that has a non-zero sized IO
- * request to fullfill.
+ * request to fulfill.
*
* Sending descriptors to an IOC usually gets an ACK or
* NACK in response. For the write operations, we post
@@ -1317,7 +1317,7 @@ write__independent_async(int n_io_concentrators, hid_t context_id, int64_t offse
*
* Purpose: This function takes 'count' vector entries
* and initiates an asynch write operation for each.
- * By asychronous, we mean that MPI_Isends are utilized
+ * By asynchronous, we mean that MPI_Isends are utilized
* to communicate the write operations to the 'count'
* IO Concentrators. The calling function will have
* decomposed the actual user IO request into the
@@ -1670,7 +1670,7 @@ ioc_main(int64_t context_id)
#if 1 /* JRM */
if ( ( tag != READ_INDEP ) && ( tag != WRITE_INDEP ) ) {
- HDprintf("\n\nioc_main: recieved non READ_INDEP / WRITE_INDEP mssg. tag = %d.\n\n", tag);
+ HDprintf("\n\nioc_main: received non READ_INDEP / WRITE_INDEP mssg. tag = %d.\n\n", tag);
HDfflush(stdout);
}
#endif /* JRM */
@@ -1802,7 +1802,7 @@ ioc_main(int64_t context_id)
#if 1 /* JRM */
if ((tag != READ_INDEP) && (tag != WRITE_INDEP)) {
- HDprintf("\n\nioc_main: recieved non READ_INDEP / WRITE_INDEP mssg. tag = %d.\n\n", tag);
+ HDprintf("\n\nioc_main: received non READ_INDEP / WRITE_INDEP mssg. tag = %d.\n\n", tag);
HDfflush(stdout);
}
#endif /* JRM */
@@ -1867,7 +1867,7 @@ ioc_main(int64_t context_id)
} /* ioc_main() */
-#endif /* JRM */ /* re-written versin of ioc_main() */
+#endif /* JRM */ /* re-written version of ioc_main() */
/*
=========================================
@@ -2449,7 +2449,7 @@ get__subfile_name(subfiling_context_t *sf_context, int64_t h5_file_id, int subfi
*_subfile_dir = strdup(subfile_dir);
}
else {
- /* Note: Users may specify a directory name which is inaccessable
+ /* Note: Users may specify a directory name which is inaccessible
* from where the current is running. In particular, "node-local"
* storage is not uniformly available to all processes.
* We would like to check if the user pathname unavailable and
@@ -2483,7 +2483,7 @@ get__subfile_name(subfiling_context_t *sf_context, int64_t h5_file_id, int subfi
* and then the second file open using the user supplied open
* flags is invoked. The OPEN_OP provides the user flags as
* part of the RPC message. The file prefix info doesn't
- * transmited as part of the RPC since it is available as
+ * transmitted as part of the RPC since it is available as
* part of the client context which can be utilized by the
* IOC thread. We access the sf_context by reading the
* cache of contexts at the index provided with the RPC msg.
diff --git a/src/H5FDsubfiling.c b/src/H5FDsubfiling.c
index a5077a9..77bfae1 100644
--- a/src/H5FDsubfiling.c
+++ b/src/H5FDsubfiling.c
@@ -103,7 +103,7 @@ typedef struct H5FD_subfiling_t {
int fd; /* the filesystem file descriptor */
H5FD_subfiling_config_t fa; /* driver-specific file access properties */
- /* the following fields are inherrited from the sec2 VFD, and will
+ /* the following fields are inherited from the sec2 VFD, and will
* likely be deleted.
*/
int mpi_rank; /* useful MPI information */
@@ -450,7 +450,7 @@ fapl__get_subfiling_defaults(H5FD_subfiling_config_t *fa)
*
* Purpose: Modify the file access property list to use the
* H5FD_SUBFILING driver defined in this source file. All
- * driver specfic properties are passed in as a pointer to
+ * driver specific properties are passed in as a pointer to
* a suitably initialized instance of H5FD_subfiling_config_t
*
* Return: SUCCEED/FAIL
@@ -522,16 +522,16 @@ done:
* Function: H5FD_subfiling_validate_config()
*
* Purpose: Test to see if the supplied instance of
- * H5FD_subfiling_config_t contains internally consistant data.
+ * H5FD_subfiling_config_t contains internally consistent data.
* Return SUCCEED if so, and FAIL otherwise.
*
- * Note the difference between internally consistant and
+ * Note the difference between internally consistent and
* correct. As we will have to try to setup subfiling to
* determine whether the supplied data is correct,
- * we will settle for internal consistancy at this point
+ * we will settle for internal consistency at this point
*
* Return: SUCCEED if instance of H5FD_subfiling_config_t contains
- * internally consistant data, FAIL otherwise.
+ * internally consistent data, FAIL otherwise.
*
* Programmer: Jacob Smith
* 9/10/17
@@ -839,7 +839,7 @@ H5FD__subfiling_open(const char *name, unsigned flags, hid_t subfiling_fapl_id,
H5FD_sec2_t *hdf_file = (H5FD_sec2_t *)file_ptr->sf_file;
h5_stat_t sb;
/* We create a new file descriptor for our file structure.
- * Basically, we want these seperate so that sec2 can
+ * Basically, we want these separate so that sec2 can
* deal with the opened file for additional operations
* (especially close) without interfering with subfiling.
*/
@@ -1169,7 +1169,7 @@ H5FD__subfiling_set_eoa(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t a
* and a file offset) of any storage operation.
*
* Having a defined storage layout, the virtual file EOF
- * calculation shoud be the MAXIMUM value returned by the
+ * calculation should be the MAXIMUM value returned by the
* collection of IOCs. Every MPI rank which hosts an IOC
* maintains it's own EOF by updating that value for each
* WRITE operation that completes, i.e. if a new local EOF
@@ -1182,7 +1182,7 @@ H5FD__subfiling_set_eoa(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t a
* we also determine the 'sf_blocksize_per_stripe' which
* is simply the 'sf_stripe_size' * 'n_ioc_concentrators'
*
- * 2. For every write operation, the IOC recieves a message
+ * 2. For every write operation, the IOC receives a message
* containing a file_offset and the data_size.
* 3. The file_offset + data_size are in turn used to
* create a stripe_id:
@@ -1844,7 +1844,7 @@ H5FD__subfiling_read_vector(H5FD_t *_file, hid_t dxpl_id, uint32_t count, H5FD_m
HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST));
/* Note that the following code does not let the sub-filing VFD participate
- * in collective calls when thre is no data to write. This is not an issue
+ * in collective calls when there is no data to write. This is not an issue
* now, as we don't do anything special with collective operations. However
* this needs to be fixed.
*/
@@ -1992,7 +1992,7 @@ H5FD__subfiling_write_vector(H5FD_t *_file, hid_t dxpl_id, uint32_t count, H5FD_
HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST));
/* Note that the following code does not let the sub-filing VFD participate
- * in collective calls when thre is no data to write. This is not an issue
+ * in collective calls when there is no data to write. This is not an issue
* now, as we don't do anything special with collective operations. However
* this needs to be fixed.
*/
@@ -2111,7 +2111,7 @@ H5FD__subfiling_lock(H5FD_t *_file, hbool_t rw)
HDassert(file);
if (file->fa.require_ioc)
- puts("Subfiling driver doesn't suport file locking");
+ puts("Subfiling driver doesn't support file locking");
else {
if (H5FD_lock(file->sf_file, rw) < 0)
HSYS_GOTO_ERROR(H5E_FILE, H5E_BADFILE, FAIL, "unable to lock file")
diff --git a/src/H5FDsubfiling.h b/src/H5FDsubfiling.h
index 59a78ce..458f82f 100644
--- a/src/H5FDsubfiling.h
+++ b/src/H5FDsubfiling.h
@@ -72,7 +72,7 @@
* `stripe_count` (int32_t)
*
* The integer value which identifies the total number of
- * subfiles that have been algorthmically been selected to
+ * subfiles that have been algorithmically been selected to
* to contain the segments of raw data which make up an HDF5
* file. This value is used to implement the RAID-0 functionality
* when reading or writing datasets.
@@ -89,7 +89,7 @@
*
* The io_selection_t defines a specific algorithm by which IO
* concentrators (IOCs) and sub-files are identified. The available
- * algorthms are: SELECT_IOC_ONE_PER_NODE, SELECT_IOC_EVERY_NTH_RANK,
+ * algorithms are: SELECT_IOC_ONE_PER_NODE, SELECT_IOC_EVERY_NTH_RANK,
* SELECT_IOC_WITH_CONFIG, and SELECT_IOC_TOTAL.
*
*** STACKING and other VFD support
@@ -101,12 +101,12 @@
* A valid file access property list (fapl) is cached on each
* process and thus enables selection of an alternative provider
* for subsequent file operations.
- * By defalt, Sub-filing employs an additional support VFD that
+ * By default, Sub-filing employs an additional support VFD that
* provides file IO proxy capabilities to all MPI ranks in a
* distributed parallel application. This IO indirection
* thus allows application access all sub-files even while
* these may actually be node-local and thus not directly
- * accessable to remote ranks.
+ * accessible to remote ranks.
*
*** Subfiling file Info
*
@@ -149,7 +149,7 @@ typedef struct config_common_t {
* Require_IOC is a boolean flag with a default value of TRUE.
* This flag indicates that the stacked H5FDioc VFD should be
* employed for sub-filing operations. The default flag can be
- * overriden with an environment variable: H5_REQUIRE_IOC=0
+ * overridden with an environment variable: H5_REQUIRE_IOC=0
*
*/
@@ -194,7 +194,7 @@ H5_DLL herr_t H5Pget_fapl_subfiling(hid_t fapl_id, H5FD_subfiling_config_t *conf
* \brief Modifies the file access property list to use the #H5FD_SUBFILING driver
*
* \fapl_id
- * \param[in] vfd_config #H5FD_SUBFILING driver specfic properties. If NULL, then
+ * \param[in] vfd_config #H5FD_SUBFILING driver specific properties. If NULL, then
* the IO concentrator VFD will be used.
* \returns \herr_t
*
diff --git a/src/H5FDsubfiling_priv.h b/src/H5FDsubfiling_priv.h
index 4ca63d0..37101f0 100644
--- a/src/H5FDsubfiling_priv.h
+++ b/src/H5FDsubfiling_priv.h
@@ -84,7 +84,7 @@ extern "C" {
* `stripe_count` (int32_t)
*
* The integer value which identifies the total number of
- * subfiles that have been algorthmically been selected to
+ * subfiles that have been algorithmically been selected to
* to contain the segments of raw data which make up an HDF5
* file. This value is used to implement the RAID-0 functionality
* when reading or writing datasets.
@@ -101,7 +101,7 @@ extern "C" {
*
* The io_selection_t defines a specific algorithm by which IO
* concentrators (IOCs) and sub-files are identified. The available
- * algorthms are: SELECT_IOC_ONE_PER_NODE, SELECT_IOC_EVERY_NTH_RANK,
+ * algorithms are: SELECT_IOC_ONE_PER_NODE, SELECT_IOC_EVERY_NTH_RANK,
* SELECT_IOC_WITH_CONFIG, and SELECT_IOC_TOTAL.
*
*** STACKING and other VFD support
@@ -113,12 +113,12 @@ extern "C" {
* A valid file access property list (fapl) is cached on each
* process and thus enables selection of an alternative provider
* for subsequent file operations.
- * By defalt, Sub-filing employs an additional support VFD that
+ * By default, Sub-filing employs an additional support VFD that
* provides file IO proxy capabilities to all MPI ranks in a
* distributed parallel application. This IO indirection
* thus allows application access all sub-files even while
* these may actually be node-local and thus not directly
- * accessable to remote ranks.
+ * accessible to remote ranks.
*
*** Subfiling file Info
*
@@ -154,7 +154,7 @@ extern "C" {
typedef struct stat_record {
int64_t op_count; /* How many ops in total */
- double min; /* minium (time) */
+ double min; /* minimum (time) */
double max; /* maximum (time) */
double total; /* average (time) */
} stat_record_t;
@@ -291,7 +291,7 @@ typedef struct app_layout_t {
long hostid; /* value returned by gethostid() */
layout_t *layout; /* Vector of {rank,hostid} values */
int * node_ranks; /* ranks extracted from sorted layout */
- int node_count; /* Total nodes (differnt hostids) */
+ int node_count; /* Total nodes (different hostids) */
int node_index; /* My node: index into node_ranks */
int local_peers; /* How may local peers on my node */
int world_rank; /* My MPI rank */
@@ -399,7 +399,7 @@ extern atomic_int sf_io_ops_pending;
* At least initially, all sanity checking is done with asserts, as the
* the existing I/O concentrator code is not well integrated into the HDF5
* error reporting system. This will have to be revisited for a production
- * version, but it should be suficient for now.
+ * version, but it should be sufficient for now.
*
* JRM -- 11/2/21
*
@@ -490,11 +490,11 @@ do {
* the IOC I/O Queue. This field points to the previous entry
* in the queue, or NULL if there is no previous entry.
*
- * in_progress: Boolean flag that must be FALSE when the entry is insterted
+ * in_progress: Boolean flag that must be FALSE when the entry is inserted
* into the IOC I/O Queue, and set to TRUE when the entry is dispatched
* to the worker thread pool for execution.
*
- * When in_progress is FALS, the enty is said to be pending.
+ * When in_progress is FALS, the entry is said to be pending.
*
* counter: uint32_t containing a serial number assigned to this IOC
* I/O Queue entry. Note that this will roll over on long
@@ -544,7 +544,7 @@ typedef struct H5FD_ioc_io_queue_entry {
hbool_t in_progress;
uint32_t counter;
- /* rework these fileds */ /* JRM */
+ /* rework these fields */ /* JRM */
sf_work_request_t wk_req;
struct hg_thread_work thread_wk;
@@ -589,7 +589,7 @@ struct hg_thread_work {
* structure H5FD_ioc_io_queue
*
* This is a temporary structure -- its fields should be moved to an I/O
- * concentrator Catchall structure eventualy.
+ * concentrator Catchall structure eventually.
*
* The fields of this structure support the io queue used to receive and
* sequence I/O requests for execution by the worker threads. The rules
@@ -598,20 +598,20 @@ struct hg_thread_work {
* 1) Non-overlaping I/O requests must be fed to the worker threads in
* the order received, and may execute concurrently
*
- * 2) Overlaping read requests must be fed to the worker threads in
+ * 2) Overlapping read requests must be fed to the worker threads in
* the order received, but may execute concurrently.
*
* 3) If any pair of I/O requests overlap, and at least one is a write
* request, they must be executed in strict arrival order, and the
* first must complete before the second starts.
*
- * Due to the strict ordering requirment in rule 3, entries must be
+ * Due to the strict ordering requirement in rule 3, entries must be
* inserted at the tail of the queue in receipt order, and retained on
* the queue until completed. Entries in the queue are marked pending
* when inserted on the queue, in progress when handed to a worker
* thread, and deleted from the queue when completed.
*
- * The dispatch algorith is as follows:
+ * The dispatch algorithm is as follows:
*
* 1) Set X equal to the element at the head of the queue.
*
@@ -641,7 +641,7 @@ struct hg_thread_work {
* I/O requests from the queue, check to see if there are any pending
* requests, and trigger the dispatch algorithm if there are.
*
- * The fileds in the structure are discussed individually below.
+ * The fields in the structure are discussed individually below.
*
* magic: Unsigned 32 bit integer always set to H5FD_IOC__IO_Q_MAGIC.
* This field is used to validate pointers to instances of
@@ -673,7 +673,7 @@ struct hg_thread_work {
* to wrap around once its maximum value is reached.
*
* q_mutex: Mutex used to ensure that only one thread accesses the IOC I/O
- * Queue at once. This mutex must be held to access of modifiy
+ * Queue at once. This mutex must be held to access of modify
* all fields of the
*
*
@@ -687,10 +687,10 @@ struct hg_thread_work {
* max_q_len: Maximum number of requests residing on the IOC I/O Queue at
* any point in time in the current run.
*
- * max_num_pending: Maximum number of pending rquests residing on the IOC
+ * max_num_pending: Maximum number of pending requests residing on the IOC
* I/O Queue at any point in time in the current run.
*
- * max_num_in_progress: Maximum number of in progress rquests residing on
+ * max_num_in_progress: Maximum number of in progress requests residing on
* the IOC I/O Queue at any point in time in the current run.
*
* ind_read_requests: Number of independent read requests received by the
diff --git a/src/H5HF.c b/src/H5HF.c
index 8e69032..7904af9 100644
--- a/src/H5HF.c
+++ b/src/H5HF.c
@@ -355,7 +355,7 @@ H5HF_insert(H5HF_t *fh, size_t size, const void *obj, void *id /*out*/)
if (size > hdr->max_man_size) {
/* Store 'huge' object in heap
*
- * Although not ideal, we can quiet the const warning here becuase no
+ * Although not ideal, we can quiet the const warning here because no
* obj pointer that was originally const should ever arrive here.
*/
H5_GCC_CLANG_DIAG_OFF("cast-qual")
diff --git a/src/H5SL.c b/src/H5SL.c
index beb00c2..e12e21a 100644
--- a/src/H5SL.c
+++ b/src/H5SL.c
@@ -311,7 +311,7 @@
{ \
H5SL_node_t *_last = X; /* Lowest node in the current gap */ \
H5SL_node_t *_llast = X; /* Lowest node in the previous gap */ \
- H5SL_node_t *_next = NULL; /* Highest node in the currect gap */ \
+ H5SL_node_t *_next = NULL; /* Highest node in the current gap */ \
H5SL_node_t *_drop = NULL; /* Low node of the gap to drop into */ \
H5SL_node_t *_ldrop = NULL; /* Low node of gap before the one to drop into */ \
H5SL_node_t *_head = SLIST->header; /* Head of the skip list */ \
diff --git a/src/mercury/include/na.h b/src/mercury/include/na.h
index 88b97f8..6f75b28 100644
--- a/src/mercury/include/na.h
+++ b/src/mercury/include/na.h
@@ -405,7 +405,7 @@ NA_PUBLIC na_return_t NA_Msg_init_unexpected(na_class_t *na_class, void *buf, na
* receive to be posted at the destination before sending the message and the
* destination is allowed to drop the message without notification. However,
* in general, NA plugins are encouraged to remain reliable to avoid unnecessary
- * timeouts and cancelations.
+ * timeouts and cancellations.
*
* Users must manually create an operation ID through NA_Op_create() and pass
* it through op_id for future use and prevent multiple ID creation.
diff --git a/src/mercury/include/na_sm.h b/src/mercury/include/na_sm.h
index 709c639..3b1cd8d 100644
--- a/src/mercury/include/na_sm.h
+++ b/src/mercury/include/na_sm.h
@@ -43,7 +43,7 @@ extern "C" {
#endif
/**
- * Get the curent host ID (generate a new one if none exists).
+ * Get the current host ID (generate a new one if none exists).
*
* \param id [IN/OUT] pointer to SM host ID
*
diff --git a/test/cork.c b/test/cork.c
index 543e890..c22308b 100644
--- a/test/cork.c
+++ b/test/cork.c
@@ -1887,7 +1887,7 @@ test_dset_cork(hbool_t swmr, hbool_t new_format)
{
hid_t fid = H5I_INVALID_HID; /* File ID */
hid_t fapl; /* File access property list */
- hid_t gid = H5I_INVALID_HID; /* Groupd ID */
+ hid_t gid = H5I_INVALID_HID; /* Group ID */
hid_t did1 = H5I_INVALID_HID, did2 = H5I_INVALID_HID; /* Dataset IDs */
hid_t tid1 = H5I_INVALID_HID, tid2 = H5I_INVALID_HID; /* Datatype IDs */
hid_t sid = H5I_INVALID_HID; /* Dataspace ID */
diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c
index 501b8b8..a026d50 100644
--- a/testpar/t_subfiling_vfd.c
+++ b/testpar/t_subfiling_vfd.c
@@ -160,7 +160,7 @@ _get_subfiling_extension_info()
* Function: _populate_filepath
*
* Purpose: Given a directory name and a base name, concatenate the two and
- * run h5fixname() to get the "actual" path to the intented target.
+ * run h5fixname() to get the "actual" path to the intended target.
* `h5suffix' should be FALSE to keep the base name unaltered;
* TRUE will append the '.h5' h5suffix to the basename...
* FALSE -> h5fixname_no_suffix(), TRUE -> h5fixname()
@@ -1308,7 +1308,7 @@ create_subfiling_ioc_fapl(const char *_basename, struct subfilingtest_filenames
TEST_ERROR;
}
- /* Now we can set the SUBFILING fapl befor returning. */
+ /* Now we can set the SUBFILING fapl before returning. */
if (H5Pset_fapl_subfiling(ret_value, &subfiling_conf) == FAIL) {
TEST_ERROR;
}
diff --git a/testpar/t_vfd.c b/testpar/t_vfd.c
index 26a4423..c54ea2f 100644
--- a/testpar/t_vfd.c
+++ b/testpar/t_vfd.c
@@ -357,7 +357,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
failure_mssg = "Can't create ioc fapl.";
}
-#if 1 /* JRM */ /* this is temporary -- rework for programatic control later */
+#if 1 /* JRM */ /* this is temporary -- rework for programmatic control later */
memset(&ioc_config, 0, sizeof(ioc_config));
memset(&subfiling_conf, 0, sizeof(subfiling_conf));
@@ -407,7 +407,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
#endif /* JRM */
}
- /* Now we can set the SUBFILING fapl befor returning. */
+ /* Now we can set the SUBFILING fapl before returning. */
if ((pass) && (H5Pset_fapl_subfiling(fapl_id, &subfiling_conf) == FAIL)) {
pass = FALSE;
diff --git a/utils/tools/h5dwalk/h5dwalk.1 b/utils/tools/h5dwalk/h5dwalk.1
index 60e1080..eb0e5e8 100644
--- a/utils/tools/h5dwalk/h5dwalk.1
+++ b/utils/tools/h5dwalk/h5dwalk.1
@@ -25,7 +25,7 @@ Captures the hdf5 tool output into a named file.
.B \-l
or
.B \-\-log [file]
-Captures hdf5 tool output into a individual log files. If an optional file (directory) is specified, then output from all tool instances will be written in the given file directory. Without the optional filename, each tool instance ouput will be captured in a new log file whose name is associated with the hdf5 tool that was run and is written in the current working directory.
+Captures hdf5 tool output into a individual log files. If an optional file (directory) is specified, then output from all tool instances will be written in the given file directory. Without the optional filename, each tool instance output will be captured in a new log file whose name is associated with the hdf5 tool that was run and is written in the current working directory.
.TP
.B \-E
or
diff --git a/utils/tools/h5dwalk/h5dwalk.c b/utils/tools/h5dwalk/h5dwalk.c
index 1f42aed..a1e66e0 100644
--- a/utils/tools/h5dwalk/h5dwalk.c
+++ b/utils/tools/h5dwalk/h5dwalk.c
@@ -53,7 +53,7 @@ uint64_t total_bytes = 0;
/* global flags which indicate whether we need
* to capture tool outputs into a file...
* Related to this is whether the stderr should
- * be logged seperately.
+ * be logged separately.
*/
#define BUFT_SIZE 131072
/* FIXME: 'buft_max' should probably be configurable.. */
@@ -1469,7 +1469,7 @@ main(int argc, const char *argv[])
/* As we move forward, we might allow the HDF5 tool to be */
/* queried for an acceptable set set of runtime arguments. */
/* This could be just a simple string to allow getopt_long */
- /* to be invoked on the remaing command line arguments. */
+ /* to be invoked on the remaining command line arguments. */
/**************************************************************/
int *path_indices = NULL;
@@ -1635,7 +1635,7 @@ main(int argc, const char *argv[])
if (outputname != NULL) {
if (!text) {
if (rank == 0) {
- puts("ouput capture needs to be a text formated file");
+ puts("output capture needs to be a text formatted file");
}
}
else {