diff options
author | Scot Breitenfeld <brtnfld@hdfgroup.org> | 2022-01-19 15:41:56 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-01-19 15:41:56 (GMT) |
commit | 5875cb94648b5b0b7a0923306b3084d373f1735f (patch) | |
tree | 58562552e1de458944465c7d990e6c655a8fee17 /src/H5FDsubfile_mpi.c | |
parent | e72e64b87b77bb1ba32642528f19db1217869547 (diff) | |
download | hdf5-inactive/selection_io_with_subfiling_vfd.zip hdf5-inactive/selection_io_with_subfiling_vfd.tar.gz hdf5-inactive/selection_io_with_subfiling_vfd.tar.bz2 |
spelling corrections (#1382)inactive/selection_io_with_subfiling_vfd
* spelling correction
Diffstat (limited to 'src/H5FDsubfile_mpi.c')
-rw-r--r-- | src/H5FDsubfile_mpi.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/src/H5FDsubfile_mpi.c b/src/H5FDsubfile_mpi.c index 4212676..79ed2b6 100644 --- a/src/H5FDsubfile_mpi.c +++ b/src/H5FDsubfile_mpi.c @@ -196,7 +196,7 @@ numDigits(int n) /* ===================================================================== */ /* MPI_Datatype Creation functions. - * These are catagorized by usage paterns, i.e. when data is sent to or + * These are categorized by usage patterns, i.e. when data is sent to or * received from and IOC, the initial data offset provided by the user * may or may NOT start on a stripe boundary. Because this, the initial * data segment to the selected IOC will often be less than 'stripe_size' @@ -449,7 +449,7 @@ H5FD__create_f_l_mpi_type(subfiling_context_t *context, int ioc_depth, int64_t s * Concentrator (IOC). * * Each data segment is of 'stripe_size' length and will be - * seperated from a previous or following segment by + * separated from a previous or following segment by * 'sf_blocksize_per_stripe' bytes of data. * * Return: The MPI_Datatype that will be used to send or receive data. @@ -527,7 +527,7 @@ H5FD__create_mpi_uniform_type(subfiling_context_t *context, int ioc_depth, int64 * * Return: A filled set of vectors. As a consequence of not allowing * use of MPI derived datatypes in the VFD layer, we need to - * accomodate the possiblity that large IO transactions will + * accommodate the possibility that large IO transactions will * be required to use multiple IOs per IOC. * * Example: Using 4 IOCs, each with 1M stripe-depth; when @@ -718,7 +718,7 @@ init__indep_io(void *_sf_context, size_t maxdepth, int H5_ATTR_PARALLEL_UNUSED i * * We cycle through all 'n_io_conentrators' and send a * descriptor to each IOC that has a non-zero sized IO - * request to fullfill. + * request to fulfill. * * Sending descriptors to an IOC usually gets an ACK or * NACK in response. For the read operations, we post @@ -835,7 +835,7 @@ read__independent_async(int n_io_concentrators, hid_t context_id, int64_t offset * Purpose: We provide a utility function to generate a subfiling * filename from a template. While the user provides a * name which will serve as the HDF5 file name, sub-filing - * files are related to the user filename via the filesytem + * files are related to the user filename via the filesystem * inode identifier. The inode id can be utilized as a * global unique identifier (GUID) which provides a * grouping ID to easily distinguish subfiles. @@ -935,7 +935,7 @@ write_data(io_func_t *this_func) * until the request is completed as indicated by a non-zero * flag variable. * - * As we go further with the implemention, we anticipate that + * As we go further with the implementation, we anticipate that * rather than testing a single request variable, we will * deal with a collection of all pending IO requests (on * this rank). @@ -999,7 +999,7 @@ async_completion(void *arg) * * We cycle through all 'n_io_conentrators' and send a * descriptor to each IOC that has a non-zero sized IO - * request to fullfill. + * request to fulfill. * * Sending descriptors to an IOC usually gets an ACK or * NACK in response. For the write operations, we post @@ -1317,7 +1317,7 @@ write__independent_async(int n_io_concentrators, hid_t context_id, int64_t offse * * Purpose: This function takes 'count' vector entries * and initiates an asynch write operation for each. - * By asychronous, we mean that MPI_Isends are utilized + * By asynchronous, we mean that MPI_Isends are utilized * to communicate the write operations to the 'count' * IO Concentrators. The calling function will have * decomposed the actual user IO request into the @@ -1670,7 +1670,7 @@ ioc_main(int64_t context_id) #if 1 /* JRM */ if ( ( tag != READ_INDEP ) && ( tag != WRITE_INDEP ) ) { - HDprintf("\n\nioc_main: recieved non READ_INDEP / WRITE_INDEP mssg. tag = %d.\n\n", tag); + HDprintf("\n\nioc_main: received non READ_INDEP / WRITE_INDEP mssg. tag = %d.\n\n", tag); HDfflush(stdout); } #endif /* JRM */ @@ -1802,7 +1802,7 @@ ioc_main(int64_t context_id) #if 1 /* JRM */ if ((tag != READ_INDEP) && (tag != WRITE_INDEP)) { - HDprintf("\n\nioc_main: recieved non READ_INDEP / WRITE_INDEP mssg. tag = %d.\n\n", tag); + HDprintf("\n\nioc_main: received non READ_INDEP / WRITE_INDEP mssg. tag = %d.\n\n", tag); HDfflush(stdout); } #endif /* JRM */ @@ -1867,7 +1867,7 @@ ioc_main(int64_t context_id) } /* ioc_main() */ -#endif /* JRM */ /* re-written versin of ioc_main() */ +#endif /* JRM */ /* re-written version of ioc_main() */ /* ========================================= @@ -2449,7 +2449,7 @@ get__subfile_name(subfiling_context_t *sf_context, int64_t h5_file_id, int subfi *_subfile_dir = strdup(subfile_dir); } else { - /* Note: Users may specify a directory name which is inaccessable + /* Note: Users may specify a directory name which is inaccessible * from where the current is running. In particular, "node-local" * storage is not uniformly available to all processes. * We would like to check if the user pathname unavailable and @@ -2483,7 +2483,7 @@ get__subfile_name(subfiling_context_t *sf_context, int64_t h5_file_id, int subfi * and then the second file open using the user supplied open * flags is invoked. The OPEN_OP provides the user flags as * part of the RPC message. The file prefix info doesn't - * transmited as part of the RPC since it is available as + * transmitted as part of the RPC since it is available as * part of the client context which can be utilized by the * IOC thread. We access the sf_context by reading the * cache of contexts at the index provided with the RPC msg. |