summaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authorAllen Byrne <50328838+byrnHDF@users.noreply.github.com>2023-02-16 13:23:09 (GMT)
committerGitHub <noreply@github.com>2023-02-16 13:23:09 (GMT)
commitaf9659d8475123d25f2235ef09b39567c4e2dde0 (patch)
treeb94ef18a88c9c0a18f1cd027cf19f7d1487440da /test
parent5b79bb2f0f0c9722da2236deef2875291bdb7bde (diff)
downloadhdf5-af9659d8475123d25f2235ef09b39567c4e2dde0.zip
hdf5-af9659d8475123d25f2235ef09b39567c4e2dde0.tar.gz
hdf5-af9659d8475123d25f2235ef09b39567c4e2dde0.tar.bz2
1.10: Add spelling check to CI and fix errors. (#2442)
* Update CI and support files to match latest from 1.14 * Correct spelling * More spelling corrections * spelling fixes in testpar * Fix spelling errors in tools * More tools spelling fixes * Spelling fixes for rest of tools and some src * Fix spelling errors in src files, pt 2 * Fix spelling in src pt3 * Fix spelling errors pt4 * Fix spelling errors pt5 * Spelling fix pt6 * fix spelling error examples * fix spelling in tests * fix spelling errors in test pt2 * Fix spelling errors in test pt3 * fix spelling in test pt4 * Fix spelling errors in hl * fix spelling errors in c++ * Spelling fixes for fortran * spelling fixes for bin and java * Add relative path * Change spelling action to use a file for list of ignore words * Fix spelling ignore list * remove unused file
Diffstat (limited to 'test')
-rw-r--r--test/AtomicWriterReader.txt2
-rw-r--r--test/SWMR_POSIX_Order_UG.txt4
-rw-r--r--test/SWMR_UseCase_UG.txt6
-rw-r--r--test/accum.c5
-rw-r--r--test/accum_swmr_reader.c1
-rw-r--r--test/atomic_writer.c2
-rw-r--r--test/big.c10
-rw-r--r--test/btree2.c130
-rw-r--r--test/cache.c68
-rw-r--r--test/cache_common.c16
-rw-r--r--test/cache_common.h4
-rw-r--r--test/cache_image.c12
-rw-r--r--test/cache_tagging.c46
-rw-r--r--test/chunk_info.c12
-rw-r--r--test/cmpd_dset.c2
-rw-r--r--test/cork.c6
-rw-r--r--test/direct_chunk.c14
-rw-r--r--test/dsets.c61
-rw-r--r--test/dt_arith.c26
-rw-r--r--test/dtypes.c12
-rw-r--r--test/earray.c18
-rw-r--r--test/efc.c1
-rw-r--r--test/enc_dec_plist_cross_platform.c2
-rw-r--r--test/enum.c12
-rw-r--r--test/external.c6
-rw-r--r--test/farray.c12
-rw-r--r--test/fheap.c8
-rw-r--r--test/file_image.c14
-rw-r--r--test/fillval.c4
-rw-r--r--test/filter_fail.c4
-rw-r--r--test/filter_plugin.c2
-rw-r--r--test/filter_plugin1_dsets.c4
-rw-r--r--test/filter_plugin2_dsets.c2
-rw-r--r--test/filter_plugin3_dsets.c4
-rw-r--r--test/filter_plugin4_groups.c2
-rw-r--r--test/flushrefresh.c12
-rw-r--r--test/gen_new_array.c4
-rw-r--r--test/gen_old_group.c2
-rw-r--r--test/gen_plist.c2
-rw-r--r--test/genall5.c2
-rw-r--r--test/getname.c2
-rw-r--r--test/gheap.c2
-rw-r--r--test/h5test.c4
-rw-r--r--test/hdfs.c2
-rw-r--r--test/links.c8
-rw-r--r--test/mf.c44
-rw-r--r--test/mirror_vfd.c2
-rw-r--r--test/mount.c4
-rw-r--r--test/mtime.c2
-rw-r--r--test/objcopy.c16
-rw-r--r--test/ohdr.c6
-rw-r--r--test/page_buffer.c26
-rw-r--r--test/reserved.c5
-rw-r--r--test/ros3.c6
-rw-r--r--test/s3comms.c30
-rw-r--r--test/set_extent.c14
-rw-r--r--test/stab.c6
-rw-r--r--test/swmr.c10
-rw-r--r--test/swmr_common.c2
-rw-r--r--test/swmr_generator.c2
-rw-r--r--test/swmr_reader.c2
-rw-r--r--test/swmr_sparse_writer.c2
-rw-r--r--test/swmr_start_write.c2
-rw-r--r--test/tattr.c12
-rw-r--r--test/tcoords.c2
-rw-r--r--test/testmeta.c2
-rw-r--r--test/tfile.c44
-rw-r--r--test/tgenprop.c18
-rw-r--r--test/th5o.c2
-rw-r--r--test/th5s.c2
-rw-r--r--test/tid.c4
-rw-r--r--test/timer.c10
-rw-r--r--test/tmisc.c4
-rw-r--r--test/trefer.c4
-rw-r--r--test/tselect.c44
-rw-r--r--test/tsohm.c10
-rw-r--r--test/ttsafe_attr_vlen.c2
-rw-r--r--test/ttsafe_error.c2
-rw-r--r--test/tvltypes.c2
-rw-r--r--test/twriteorder.c4
-rw-r--r--test/unlink.c6
-rw-r--r--test/unregister.c2
-rw-r--r--test/use_common.c4
-rw-r--r--test/use_disable_mdc_flushes.c2
-rw-r--r--test/vds.c16
-rw-r--r--test/vds_env.c2
86 files changed, 476 insertions, 479 deletions
diff --git a/test/AtomicWriterReader.txt b/test/AtomicWriterReader.txt
index dc0a3bd..064ba39 100644
--- a/test/AtomicWriterReader.txt
+++ b/test/AtomicWriterReader.txt
@@ -11,7 +11,7 @@ atomic_reader.c: is the "read" part of the test.
Building the Tests
------------------
-The two test parts are automically built during configure and make process.
+The two test parts are automatically built during configure and make process.
But to build them individually, you can do in test/ directory:
$ gcc atomic_writer
$ gcc atomic_reader
diff --git a/test/SWMR_POSIX_Order_UG.txt b/test/SWMR_POSIX_Order_UG.txt
index 2771af1..a4190bc 100644
--- a/test/SWMR_POSIX_Order_UG.txt
+++ b/test/SWMR_POSIX_Order_UG.txt
@@ -3,7 +3,7 @@ POSIX Write Order Test Instructions
Purpose
-------
-This documents shows the requirments, implementaion design and instructions
+This documents shows the requirements, implementation design and instructions
of building and running the POSIX Write Order test. The name of the
test is twriteorder and it resides in the test/ directory.
@@ -44,7 +44,7 @@ and they are ready for the reader processes to access.
If the system, in which the writer and reader processes run, the readers
will always get all chain-linked blocks correctly. If the order of write
-is not maintained, some reader processes may found unexpect block data.
+is not maintained, some reader processes may found unexpected block data.
Building the Tests
------------------
diff --git a/test/SWMR_UseCase_UG.txt b/test/SWMR_UseCase_UG.txt
index 1e3d1e6..677fdc4 100644
--- a/test/SWMR_UseCase_UG.txt
+++ b/test/SWMR_UseCase_UG.txt
@@ -2,7 +2,7 @@
User Guide for SWMR Use Case Programs
2. Purpose:
- This is a User Guide of the SWMR Use Case programs. It descibes the use
+ This is a User Guide of the SWMR Use Case programs. It describes the use
case program and explain how to run them.
2.1. Author and Dates:
@@ -87,7 +87,7 @@
It opens the HDF5 data file without the SWMR access mode (0 means
off). This likely will result in error. This option is provided for
- users to see the effect of the neede SWMR access mode for concurrent
+ users to see the effect of the needed SWMR access mode for concurrent
access.
3.4. Test Shell Script:
@@ -176,7 +176,7 @@
It opens the HDF5 data file without the SWMR access mode (0 means
off). This likely will result in error. This option is provided for
- users to see the effect of the neede SWMR access mode for concurrent
+ users to see the effect of the needed SWMR access mode for concurrent
access.
4.4. Test Shell Script:
diff --git a/test/accum.c b/test/accum.c
index f098021..56080a3 100644
--- a/test/accum.c
+++ b/test/accum.c
@@ -1,4 +1,5 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
* Copyright by the Board of Trustees of the University of Illinois. *
* All rights reserved. *
* *
@@ -1340,7 +1341,7 @@ test_accum_adjust(H5F_t *f)
/* ==> Accumulator will try to resize, but see that it's getting too big */
/* ==> Size of new block is less than than half maximum size of accumulator */
/* ==> New block being appended to accumulator */
- /* ==> We can slide the dirty region down, to accomodate the request */
+ /* ==> We can slide the dirty region down, to accommodate the request */
/* ==> Max Buffer Size - (dirty offset + adjust size) >= 2 * size) */
/* ==> Need to adjust location of accumulator while appending */
/* ==> Accumulator will need to be reallocated */
@@ -1386,7 +1387,7 @@ test_accum_adjust(H5F_t *f)
/* ==> Accumulator will try to resize, but see that it's getting too big */
/* ==> Size of new block is less than than half maximum size of accumulator */
/* ==> New block being appended to accumulator */
- /* ==> We can slide the dirty region down, to accomodate the request */
+ /* ==> We can slide the dirty region down, to accommodate the request */
/* ==> Max Buffer Size - (dirty offset + adjust size) < 2 * size) */
/* ==> Need to adjust location of accumulator while appending */
if (accum_write((1024 * 1024) - 5, 10, wbuf) < 0)
diff --git a/test/accum_swmr_reader.c b/test/accum_swmr_reader.c
index 5d78f1c..3dd80dd 100644
--- a/test/accum_swmr_reader.c
+++ b/test/accum_swmr_reader.c
@@ -1,4 +1,5 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
* Copyright by the Board of Trustees of the University of Illinois. *
* All rights reserved. *
* *
diff --git a/test/atomic_writer.c b/test/atomic_writer.c
index 5a6310a..33252f1 100644
--- a/test/atomic_writer.c
+++ b/test/atomic_writer.c
@@ -112,7 +112,7 @@ int
main(int argc, char *argv[])
{
int fd = -1; /* file descriptor */
- ssize_t bytes_wrote; /* the nubmer of bytes written */
+ ssize_t bytes_wrote; /* the number of bytes written */
unsigned int *buf = NULL; /* buffer to hold written data */
unsigned int n, u, i; /* local index variable */
int temp; /* temporary variable */
diff --git a/test/big.c b/test/big.c
index 6d849f1..a9a518a 100644
--- a/test/big.c
+++ b/test/big.c
@@ -43,7 +43,7 @@
* that disk space is allocated only when the contents are actually written.
* E.g., If one creates a new file, seeks forward 10 million bytes, writes
* 1 bytes and closes the file, then a sparse file, will show file size of
- * 10 million bytes but actaully uses only couple disk blocks, much smaller
+ * 10 million bytes but actually uses only couple disk blocks, much smaller
* than the formal file size.)
*
* One more consideration is that we want to distinguish an HDF5 library
@@ -70,7 +70,7 @@
#endif
/* Define Small, Large, Extra Large, Huge File which
- * corrspond to less than 2GB, 2GB, 4GB, and tens of GB file size.
+ * correspond to less than 2GB, 2GB, 4GB, and tens of GB file size.
* NO_FILE stands for "no file" to be tested.
*/
typedef enum fsizes_t { SFILE, LFILE, XLFILE, HUGEFILE, NO_FILE } fsizes_t;
@@ -607,9 +607,9 @@ usage(void)
"\t-fsize\tChange family size default to <fsize> where <fsize> is\n"
"\t\ta positive float point number. Default value is %" PRIuHSIZE ".\n"
"Examples:\n"
- "\tbig -fsize 2.1e9 \t# test with file size just under 2GB\n"
- "\tbig -fsize 2.2e9 \t# test with file size just above 2GB\n"
- "\tBe sure the file system can support the file size requested\n",
+ "\t big -fsize 2.1e9 \t# test with file size just under 2GB\n"
+ "\t big -fsize 2.2e9 \t# test with file size just above 2GB\n"
+ "\t Be sure the file system can support the file size requested\n",
(hsize_t)FAMILY_SIZE);
}
diff --git a/test/btree2.c b/test/btree2.c
index ee43f5f..e04a5b4 100644
--- a/test/btree2.c
+++ b/test/btree2.c
@@ -696,7 +696,7 @@ test_insert_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (H5B2_insert(bt2, &record) < 0)
FAIL_STACK_ERROR
- /* Attempt to find non-existant record in B-tree with 1 record */
+ /* Attempt to find non-existent record in B-tree with 1 record */
/* (Should not be found, but not fail) */
idx = 41;
if (H5B2_find(bt2, &idx, find_cb, &idx) != FALSE)
@@ -707,7 +707,7 @@ test_insert_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (H5B2_find(bt2, &idx, NULL, NULL) != FALSE)
TEST_ERROR
- /* Attempt to find existant record in B-tree with 1 record */
+ /* Attempt to find existent record in B-tree with 1 record */
idx = 42;
if (H5B2_find(bt2, &idx, find_cb, &idx) != TRUE)
TEST_ERROR
@@ -716,7 +716,7 @@ test_insert_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (H5B2_find(bt2, &idx, NULL, NULL) != TRUE)
TEST_ERROR
- /* Attempt to index non-existant record in B-tree with 1 record */
+ /* Attempt to index non-existent record in B-tree with 1 record */
idx = 0;
H5E_BEGIN_TRY
{
@@ -764,18 +764,18 @@ test_insert_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (H5B2_insert(bt2, &record) < 0)
FAIL_STACK_ERROR
- /* Attempt to find non-existant record in level-0 B-tree with several records */
+ /* Attempt to find non-existent record in level-0 B-tree with several records */
/* (Should not be found, but not fail) */
idx = 41;
if (H5B2_find(bt2, &idx, find_cb, &idx) != FALSE)
TEST_ERROR
- /* Attempt to find existant record in level-0 B-tree with several record */
+ /* Attempt to find existent record in level-0 B-tree with several record */
idx = 56;
if (H5B2_find(bt2, &idx, find_cb, &idx) != TRUE)
TEST_ERROR
- /* Attempt to index non-existant record in B-tree with several records */
+ /* Attempt to index non-existent record in B-tree with several records */
idx = 0;
H5E_BEGIN_TRY
{
@@ -935,23 +935,23 @@ test_insert_split_root(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_p
if (idx != (INSERT_SPLIT_ROOT_NREC + 2))
TEST_ERROR
- /* Attempt to find non-existant record in level-1 B-tree */
+ /* Attempt to find non-existent record in level-1 B-tree */
/* (Should not be found, but not fail) */
idx = INSERT_SPLIT_ROOT_NREC + 10;
if (H5B2_find(bt2, &idx, find_cb, &idx) != FALSE)
TEST_ERROR
- /* Attempt to find existant record in root of level-1 B-tree */
+ /* Attempt to find existent record in root of level-1 B-tree */
idx = 33;
if (H5B2_find(bt2, &idx, find_cb, &idx) != TRUE)
FAIL_STACK_ERROR
- /* Attempt to find existant record in leaf of level-1 B-tree */
+ /* Attempt to find existent record in leaf of level-1 B-tree */
idx = 56;
if (H5B2_find(bt2, &idx, find_cb, &idx) != TRUE)
FAIL_STACK_ERROR
- /* Attempt to index non-existant record in level-1 B-tree */
+ /* Attempt to index non-existent record in level-1 B-tree */
idx = 0;
H5E_BEGIN_TRY
{
@@ -1671,13 +1671,13 @@ test_insert_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (idx != ((INSERT_SPLIT_ROOT_NREC * 29) + 5))
TEST_ERROR
- /* Attempt to find non-existant record in level-2 B-tree */
+ /* Attempt to find non-existent record in level-2 B-tree */
/* (Should not be found, but not fail) */
idx = INSERT_SPLIT_ROOT_NREC * 30;
if (H5B2_find(bt2, &idx, find_cb, &idx) != FALSE)
TEST_ERROR
- /* Attempt to find existant record in root of level-2 B-tree */
+ /* Attempt to find existent record in root of level-2 B-tree */
idx = 948;
if (H5B2_find(bt2, &idx, find_cb, &idx) != TRUE)
FAIL_STACK_ERROR
@@ -1687,7 +1687,7 @@ test_insert_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)2) < 0)
TEST_ERROR
- /* Attempt to find existant record in internal node of level-2 B-tree */
+ /* Attempt to find existent record in internal node of level-2 B-tree */
idx = 505;
if (H5B2_find(bt2, &idx, find_cb, &idx) != TRUE)
FAIL_STACK_ERROR
@@ -1697,7 +1697,7 @@ test_insert_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)1) < 0)
TEST_ERROR
- /* Attempt to find existant record in leaf of level-2 B-tree */
+ /* Attempt to find existent record in leaf of level-2 B-tree */
idx = 555;
if (H5B2_find(bt2, &idx, find_cb, &idx) != TRUE)
FAIL_STACK_ERROR
@@ -1707,7 +1707,7 @@ test_insert_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)0) < 0)
TEST_ERROR
- /* Attempt to index non-existant record in level-2 B-tree */
+ /* Attempt to index non-existent record in level-2 B-tree */
idx = 0;
H5E_BEGIN_TRY
{
@@ -3060,7 +3060,7 @@ HDfprintf(stderr,"curr_time=%lu\n",(unsigned long)curr_time);
if (idx != INSERT_MANY)
TEST_ERROR
- /* Attempt to find non-existant record in level-4 B-tree */
+ /* Attempt to find non-existent record in level-4 B-tree */
/* (Should not be found, but not fail) */
idx = INSERT_MANY * 2;
if (H5B2_find(bt2, &idx, find_cb, &idx) != FALSE)
@@ -3071,12 +3071,12 @@ HDfprintf(stderr,"curr_time=%lu\n",(unsigned long)curr_time);
/* Pick random record */
idx = (hsize_t)(HDrandom() % INSERT_MANY);
- /* Attempt to find existant record in root of level-4 B-tree */
+ /* Attempt to find existent record in root of level-4 B-tree */
if (H5B2_find(bt2, &idx, find_cb, &idx) != TRUE)
FAIL_STACK_ERROR
} /* end for */
- /* Attempt to index non-existant record in level-4 B-tree, in increasing & decreasing order */
+ /* Attempt to index non-existent record in level-4 B-tree, in increasing & decreasing order */
H5E_BEGIN_TRY
{
ret = H5B2_index(bt2, H5_ITER_INC, (hsize_t)(INSERT_MANY * 3), find_cb, NULL);
@@ -3099,12 +3099,12 @@ HDfprintf(stderr,"curr_time=%lu\n",(unsigned long)curr_time);
/* Pick random record */
idx = (hsize_t)(HDrandom() % INSERT_MANY);
- /* Attempt to find existant record in root of level-4 B-tree */
+ /* Attempt to find existent record in root of level-4 B-tree */
/* (in increasing order) */
if (H5B2_index(bt2, H5_ITER_INC, idx, find_cb, &idx) < 0)
FAIL_STACK_ERROR
- /* Attempt to find existant record in root of level-4 B-tree */
+ /* Attempt to find existent record in root of level-4 B-tree */
/* (in decreasing order) */
if (H5B2_index(bt2, H5_ITER_DEC, idx, find_dec_cb, &idx) < 0)
FAIL_STACK_ERROR
@@ -3210,7 +3210,7 @@ test_update_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (H5B2_update(bt2, &record, no_modify_cb, NULL) < 0)
FAIL_STACK_ERROR
- /* Attempt to find non-existant record in B-tree with 1 record */
+ /* Attempt to find non-existent record in B-tree with 1 record */
/* (Should not be found, but not fail) */
find.key = 10;
find.val = (hsize_t)-1;
@@ -3226,7 +3226,7 @@ test_update_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (find.val != (hsize_t)-1)
TEST_ERROR
- /* Attempt to find existant record in B-tree with 1 record */
+ /* Attempt to find existent record in B-tree with 1 record */
find.key = 42;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
FAIL_STACK_ERROR
@@ -3240,7 +3240,7 @@ test_update_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (find.val != (hsize_t)-1)
TEST_ERROR
- /* Attempt to index non-existant record in B-tree with 1 record */
+ /* Attempt to index non-existent record in B-tree with 1 record */
H5E_BEGIN_TRY
{
ret = H5B2_index(bt2, H5_ITER_INC, (hsize_t)1, index_rec_cb, NULL);
@@ -3275,7 +3275,7 @@ test_update_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (H5B2_update(bt2, &modify, modify_rec_cb, &modify) < 0)
FAIL_STACK_ERROR
- /* Attempt to find non-existant record in B-tree with 1 record */
+ /* Attempt to find non-existent record in B-tree with 1 record */
/* (Should not be found, but not fail) */
find.key = 10;
find.val = (hsize_t)-1;
@@ -3305,7 +3305,7 @@ test_update_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (find.val != (hsize_t)-1)
TEST_ERROR
- /* Attempt to index non-existant record in B-tree with 1 record */
+ /* Attempt to index non-existent record in B-tree with 1 record */
H5E_BEGIN_TRY
{
ret = H5B2_index(bt2, H5_ITER_INC, (hsize_t)1, index_rec_cb, NULL);
@@ -3360,7 +3360,7 @@ test_update_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (H5B2_update(bt2, &record, no_modify_cb, NULL) < 0)
FAIL_STACK_ERROR
- /* Attempt to find non-existant record in level-0 B-tree with several records */
+ /* Attempt to find non-existent record in level-0 B-tree with several records */
/* (Should not be found, but not fail) */
find.key = 10;
find.val = (hsize_t)-1;
@@ -3369,14 +3369,14 @@ test_update_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (find.val != (hsize_t)-1)
TEST_ERROR
- /* Attempt to find existant record in level-0 B-tree with several records */
+ /* Attempt to find existent record in level-0 B-tree with several records */
find.key = 56;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
TEST_ERROR
if (find.val != 12)
TEST_ERROR
- /* Attempt to index non-existant record in B-tree with several records */
+ /* Attempt to index non-existent record in B-tree with several records */
H5E_BEGIN_TRY
{
ret = H5B2_index(bt2, H5_ITER_INC, (hsize_t)4, index_rec_cb, NULL);
@@ -3448,7 +3448,7 @@ test_update_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (H5B2_update(bt2, &record, modify_rec_cb, &modify) < 0)
FAIL_STACK_ERROR
- /* Attempt to find non-existant record in level-0 B-tree with several records */
+ /* Attempt to find non-existent record in level-0 B-tree with several records */
/* (Should not be found, but not fail) */
find.key = 41;
find.val = (hsize_t)-1;
@@ -3457,14 +3457,14 @@ test_update_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (find.val != (hsize_t)-1)
TEST_ERROR
- /* Attempt to find existant record in level-0 B-tree with several record */
+ /* Attempt to find existent record in level-0 B-tree with several record */
find.key = 56;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
TEST_ERROR
if (find.val != 22)
TEST_ERROR
- /* Attempt to index non-existant record in B-tree with several records */
+ /* Attempt to index non-existent record in B-tree with several records */
H5E_BEGIN_TRY
{
ret = H5B2_index(bt2, H5_ITER_INC, (hsize_t)4, index_rec_cb, NULL);
@@ -3692,7 +3692,7 @@ test_update_split_root(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_p
if (idx.key != (INSERT_SPLIT_ROOT_NREC_REC + 2))
TEST_ERROR
- /* Attempt to find non-existant record in level-1 B-tree */
+ /* Attempt to find non-existent record in level-1 B-tree */
/* (Should not be found, but not fail) */
find.key = 800;
find.val = (hsize_t)-1;
@@ -3701,7 +3701,7 @@ test_update_split_root(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_p
if (find.val != (hsize_t)-1)
TEST_ERROR
- /* Attempt to find existant record in root of level-1 B-tree */
+ /* Attempt to find existent record in root of level-1 B-tree */
find.key = 33;
find.val = (hsize_t)-1;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
@@ -3711,7 +3711,7 @@ test_update_split_root(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_p
if (find.val != 67)
TEST_ERROR
- /* Attempt to find existant record in leaf of level-1 B-tree */
+ /* Attempt to find existent record in leaf of level-1 B-tree */
find.key = 56;
find.val = (hsize_t)-1;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
@@ -3721,7 +3721,7 @@ test_update_split_root(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_p
if (find.val != 113)
TEST_ERROR
- /* Attempt to index non-existant record in level-1 B-tree */
+ /* Attempt to index non-existent record in level-1 B-tree */
H5E_BEGIN_TRY
{
ret = H5B2_index(bt2, H5_ITER_INC, (hsize_t)(INSERT_SPLIT_ROOT_NREC_REC + 2), index_rec_cb, NULL);
@@ -4478,7 +4478,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (idx.key != ((INSERT_SPLIT_ROOT_NREC_REC * 41) + 4))
TEST_ERROR
- /* Attempt to find non-existant record in level-2 B-tree */
+ /* Attempt to find non-existent record in level-2 B-tree */
/* (Should not be found, but not fail) */
find.key = INSERT_SPLIT_ROOT_NREC_REC * 42;
find.val = (hsize_t)-1;
@@ -4487,7 +4487,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (find.val != (hsize_t)-1)
TEST_ERROR
- /* Attempt to find existant record in root of level-2 B-tree */
+ /* Attempt to find existent record in root of level-2 B-tree */
find.key = 1347;
find.val = (hsize_t)-1;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
@@ -4500,7 +4500,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)2) < 0)
TEST_ERROR
- /* Attempt to find existant record in internal node of level-2 B-tree */
+ /* Attempt to find existent record in internal node of level-2 B-tree */
find.key = 513;
find.val = (hsize_t)-1;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
@@ -4513,7 +4513,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)1) < 0)
TEST_ERROR
- /* Attempt to find existant record in leaf of level-2 B-tree */
+ /* Attempt to find existent record in leaf of level-2 B-tree */
find.key = 555;
find.val = (hsize_t)-1;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
@@ -4526,7 +4526,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)0) < 0)
TEST_ERROR
- /* Attempt to index non-existant record in level-2 B-tree */
+ /* Attempt to index non-existent record in level-2 B-tree */
H5E_BEGIN_TRY
{
ret = H5B2_index(bt2, H5_ITER_INC, (hsize_t)(INSERT_SPLIT_ROOT_NREC_REC * 42), index_rec_cb, NULL);
@@ -4643,7 +4643,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (idx.key != ((INSERT_SPLIT_ROOT_NREC_REC * 41) + 4))
TEST_ERROR
- /* Attempt to find non-existant record in level-2 B-tree */
+ /* Attempt to find non-existent record in level-2 B-tree */
/* (Should not be found, but not fail) */
find.key = INSERT_SPLIT_ROOT_NREC_REC * 42;
find.val = (hsize_t)-1;
@@ -4652,7 +4652,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (find.val != (hsize_t)-1)
TEST_ERROR
- /* Attempt to find existant record in root of level-2 B-tree */
+ /* Attempt to find existent record in root of level-2 B-tree */
find.key = 1344;
find.val = (hsize_t)-1;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
@@ -4665,7 +4665,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)2) < 0)
TEST_ERROR
- /* Attempt to find existant record in internal node of level-2 B-tree */
+ /* Attempt to find existent record in internal node of level-2 B-tree */
find.key = 512;
find.val = (hsize_t)-1;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
@@ -4678,7 +4678,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)1) < 0)
TEST_ERROR
- /* Attempt to find existant record in leaf of level-2 B-tree */
+ /* Attempt to find existent record in leaf of level-2 B-tree */
find.key = 555;
find.val = (hsize_t)-1;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
@@ -4691,7 +4691,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)0) < 0)
TEST_ERROR
- /* Attempt to index non-existant record in level-2 B-tree */
+ /* Attempt to index non-existent record in level-2 B-tree */
H5E_BEGIN_TRY
{
ret = H5B2_index(bt2, H5_ITER_INC, (hsize_t)(INSERT_SPLIT_ROOT_NREC_REC * 42), index_rec_cb, NULL);
@@ -4818,7 +4818,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (idx.key != ((INSERT_SPLIT_ROOT_NREC_REC * 41) + 6))
TEST_ERROR
- /* Attempt to find non-existant record in level-2 B-tree */
+ /* Attempt to find non-existent record in level-2 B-tree */
/* (Should not be found, but not fail) */
find.key = INSERT_SPLIT_ROOT_NREC_REC * 42;
find.val = (hsize_t)-1;
@@ -4827,7 +4827,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (find.val != (hsize_t)-1)
TEST_ERROR
- /* Attempt to find existant record in root of level-2 B-tree */
+ /* Attempt to find existent record in root of level-2 B-tree */
find.key = 1345;
find.val = (hsize_t)-1;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
@@ -4840,7 +4840,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)2) < 0)
TEST_ERROR
- /* Attempt to find existant record in internal node of level-2 B-tree */
+ /* Attempt to find existent record in internal node of level-2 B-tree */
find.key = 513;
find.val = (hsize_t)-1;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
@@ -4853,7 +4853,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)1) < 0)
TEST_ERROR
- /* Attempt to find existant record in leaf of level-2 B-tree */
+ /* Attempt to find existent record in leaf of level-2 B-tree */
find.key = 555;
find.val = (hsize_t)-1;
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
@@ -4866,7 +4866,7 @@ test_update_make_level2(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_
if (check_node_depth(bt2, &record, (unsigned)0) < 0)
TEST_ERROR
- /* Attempt to index non-existant record in level-2 B-tree */
+ /* Attempt to index non-existent record in level-2 B-tree */
H5E_BEGIN_TRY
{
ret = H5B2_index(bt2, H5_ITER_INC, (hsize_t)(INSERT_SPLIT_ROOT_NREC_REC * 42), index_rec_cb, NULL);
@@ -5051,7 +5051,7 @@ HDfprintf(stderr, "curr_time = %lu\n", (unsigned long)curr_time);
if (iter.key != INSERT_MANY_REC)
TEST_ERROR
- /* Attempt to find non-existant record in level-4 B-tree */
+ /* Attempt to find non-existent record in level-4 B-tree */
/* (Should not be found, but not fail) */
find.key = INSERT_MANY_REC * 2;
find.val = (hsize_t)-1;
@@ -5066,14 +5066,14 @@ HDfprintf(stderr, "curr_time = %lu\n", (unsigned long)curr_time);
find.key = (hsize_t)(HDrandom() % INSERT_MANY_REC);
find.val = (hsize_t)-1;
- /* Attempt to find existant record in level-4 B-tree */
+ /* Attempt to find existent record in level-4 B-tree */
if (H5B2_find(bt2, &find, find_rec_cb, &find) != TRUE)
FAIL_STACK_ERROR
if (find.val != (find.key * 2))
TEST_ERROR
} /* end for */
- /* Attempt to index non-existant record in level-4 B-tree, in increasing & decreasing order */
+ /* Attempt to index non-existent record in level-4 B-tree, in increasing & decreasing order */
H5E_BEGIN_TRY
{
ret = H5B2_index(bt2, H5_ITER_INC, (hsize_t)(INSERT_MANY_REC * 3), find_rec_cb, NULL);
@@ -5102,7 +5102,7 @@ HDfprintf(stderr, "curr_time = %lu\n", (unsigned long)curr_time);
find.key = (hsize_t)-1;
find.val = (hsize_t)-1;
- /* Attempt to find existant record in level-4 B-tree */
+ /* Attempt to find existent record in level-4 B-tree */
/* (in increasing order) */
if (H5B2_index(bt2, H5_ITER_INC, idx, index_rec_cb, &find) < 0)
FAIL_STACK_ERROR
@@ -5115,7 +5115,7 @@ HDfprintf(stderr, "curr_time = %lu\n", (unsigned long)curr_time);
find.key = (hsize_t)-1;
find.val = (hsize_t)-1;
- /* Attempt to find existant record in level-4 B-tree */
+ /* Attempt to find existent record in level-4 B-tree */
/* (in decreasing order) */
if (H5B2_index(bt2, H5_ITER_DEC, idx, index_rec_cb, &find) < 0)
FAIL_STACK_ERROR
@@ -5234,7 +5234,7 @@ test_remove_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
PASSED();
- TESTING("B-tree remove: non-existant record from 1 record B-tree");
+ TESTING("B-tree remove: non-existent record from 1 record B-tree");
/* Check for closing & re-opening the B-tree */
if (reopen_btree(f, &bt2, bt2_addr, tparam) < 0)
@@ -5261,7 +5261,7 @@ test_remove_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
if (!H5F_addr_defined(root_addr))
TEST_ERROR
- /* Attempt to remove a non-existant record from a B-tree with 1 record */
+ /* Attempt to remove a non-existent record from a B-tree with 1 record */
record = 0;
H5E_BEGIN_TRY
{
@@ -5275,7 +5275,7 @@ test_remove_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
PASSED();
/* Attempt to remove a record from a B-tree with 1 record */
- TESTING("B-tree remove: existant record from 1 record B-tree");
+ TESTING("B-tree remove: existent record from 1 record B-tree");
/* Check for closing & re-opening the B-tree */
if (reopen_btree(f, &bt2, bt2_addr, tparam) < 0)
@@ -5339,8 +5339,8 @@ test_remove_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
PASSED();
- /* Attempt to remove a non-existant record from a level-0 B-tree with mult. record */
- TESTING("B-tree remove: non-existant record from level-0 B-tree");
+ /* Attempt to remove a non-existent record from a level-0 B-tree with mult. record */
+ TESTING("B-tree remove: non-existent record from level-0 B-tree");
/* Check for closing & re-opening the B-tree */
if (reopen_btree(f, &bt2, bt2_addr, tparam) < 0)
@@ -5359,7 +5359,7 @@ test_remove_basic(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_
PASSED();
/* Attempt to remove a record from a level-0 B-tree with mult. record */
- TESTING("B-tree remove: mult. existant records from level-0 B-tree");
+ TESTING("B-tree remove: mult. existent records from level-0 B-tree");
/* Check for closing & re-opening the B-tree */
if (reopen_btree(f, &bt2, bt2_addr, tparam) < 0)
@@ -5517,7 +5517,7 @@ test_remove_level1_noredistrib(hid_t fapl, const H5B2_create_t *cparam, const bt
herr_t ret; /* Generic error return value */
/* B-tree record removal tests */
- TESTING("B-tree remove: non-existant record from level-1 B-tree");
+ TESTING("B-tree remove: non-existent record from level-1 B-tree");
/* Create the file for the test */
if (create_file(&file, &f, fapl) < 0)
@@ -5550,7 +5550,7 @@ test_remove_level1_noredistrib(hid_t fapl, const H5B2_create_t *cparam, const bt
if (!H5F_addr_defined(root_addr))
TEST_ERROR
- /* Attempt to remove a non-existant record from a B-tree with 1 record */
+ /* Attempt to remove a non-existent record from a B-tree with 1 record */
record = (INSERT_SPLIT_ROOT_NREC * 2) + 1;
H5E_BEGIN_TRY
{
@@ -9605,7 +9605,7 @@ test_modify(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_t *tpa
/*
* Test modifying records
*/
- TESTING("B-tree modify: attempt to modify non-existant record");
+ TESTING("B-tree modify: attempt to modify non-existent record");
/* Create the file for the test */
if (create_file(&file, &f, fapl) < 0)
@@ -9628,7 +9628,7 @@ test_modify(hid_t fapl, const H5B2_create_t *cparam, const bt2_test_param_t *tpa
if (bt2_stat.depth != 2)
TEST_ERROR
- /* Attempt to modify a non-existant record */
+ /* Attempt to modify a non-existent record */
record = 3;
modify = 4;
H5E_BEGIN_TRY
diff --git a/test/cache.c b/test/cache.c
index 8ab5838..41be0d4 100644
--- a/test/cache.c
+++ b/test/cache.c
@@ -2789,8 +2789,8 @@ write_permitted_check(int
* Test the behaviour with different flags.
*
* This test was added primarily to test basic insert
- * pinned entry functionallity, but I through in explicit
- * tests for other functionallity that is tested implicitly
+ * pinned entry functionality, but I through in explicit
+ * tests for other functionality that is tested implicitly
* elsewhere.
*
* Return: void
@@ -2908,7 +2908,7 @@ check_insert_entry(unsigned paged)
}
}
- /* Thats all we can get from H5C_get_entry_status().
+ /* That's all we can get from H5C_get_entry_status().
* Now start looking at the cache data structures directly.
*/
@@ -5186,7 +5186,7 @@ check_flush_cache__flush_ops(H5F_t *file_ptr)
*
* 10/8/07 -- JRM
* Added a resize operation to this test to satisfy the new
- * requiremnt that any resize of an entry on flush will always
+ * requirement that any resize of an entry on flush will always
* be accompanied by a resize. Note that as a result, this
* test becomes redundant with later tests.
*/
@@ -5283,7 +5283,7 @@ check_flush_cache__flush_ops(H5F_t *file_ptr)
*
* 10/8/07 -- JRM
* Added a resize operation to this test to satisfy the new
- * requiremnt that any resize of an entry on flush will always
+ * requirement that any resize of an entry on flush will always
* be accompanied by a resize. Note that as a result, this
* test becomes redundant with later tests.
*/
@@ -8892,7 +8892,7 @@ check_flush_cache__flush_op_eviction_test(H5F_t *file_ptr)
*
* (VET, 9) Y 10 KB N N - dirty (VET, 8)
*
- * Recall that in this test bed, flush operations are excuted the
+ * Recall that in this test bed, flush operations are executed the
* first time the associated entry is flushed, and are then
* deleted.
*/
@@ -9445,7 +9445,7 @@ check_flush_cache__flush_op_eviction_test(H5F_t *file_ptr)
* returns to the serialize function for (VET, 9).
*
* When the serialize function for (VET, 9) is done with (VET, 8), it
- * calls H5C_unprotect(VET, 8), which markes (VET, 8) as dirty and
+ * calls H5C_unprotect(VET, 8), which marks (VET, 8) as dirty and
* unprotected, and places it at the head of the LRU.
*
* (VET, 0) is the next item on the LRU -- it is clean and is therefore
@@ -9846,7 +9846,7 @@ check_flush_cache__flush_op_eviction_test(H5F_t *file_ptr)
* Added tests for pinned entries.
*
* JRM -- 5/17/06
- * Complete reqrite of pinned entry tests to accomodate
+ * Complete rewrite of pinned entry tests to accommodate
* the new H5C_mark_entry_dirty() call.
*
*-------------------------------------------------------------------------
@@ -11842,7 +11842,7 @@ check_expunge_entry(unsigned paged)
* Function: check_multiple_read_protect()
*
* Purpose: Verify that multiple, simultaneous read protects of a
- * single entry perform as expectd.
+ * single entry perform as expected.
*
* Return: void
*
@@ -12188,7 +12188,7 @@ check_multiple_read_protect(unsigned paged)
if (pass && (entry_ptr->header.is_pinned)) {
pass = FALSE;
- failure_mssg = "enty (0,4) still pinned.\n";
+ failure_mssg = "entry (0,4) still pinned.\n";
}
unprotect_entry(file_ptr, 0, 4, /* (0,4) unprotect */
@@ -14391,7 +14391,7 @@ check_destroy_protected_err(unsigned paged)
* Function: check_duplicate_insert_err()
*
* Purpose: Verify that an attempt to insert and entry that is
- * alread in the cache will generate an error.
+ * already in the cache will generate an error.
*
* Return: void
*
@@ -16947,7 +16947,7 @@ check_auto_cache_resize(hbool_t cork_ageout, unsigned paged)
if (show_progress)
HDfprintf(stderr, "check point %d\n", checkpoint++);
- /* eigth epoch -- force the hit rate to 100% again -- should be steady
+ /* eight epoch -- force the hit rate to 100% again -- should be steady
* state.
*/
if (pass) {
@@ -17043,7 +17043,7 @@ check_auto_cache_resize(hbool_t cork_ageout, unsigned paged)
if (show_progress)
HDfprintf(stderr, "check point %d\n", checkpoint++);
- if (pass) { /* twelth epoch -- at minimum size so no more ageouts */
+ if (pass) { /* twelfth epoch -- at minimum size so no more ageouts */
rpt_fcn_called = FALSE;
i = 0;
while (pass && (i < 1000)) {
@@ -17324,7 +17324,7 @@ check_auto_cache_resize(hbool_t cork_ageout, unsigned paged)
if (show_progress)
HDfprintf(stderr, "check point %d\n", checkpoint++);
- /* eigth epoch -- still 100% hit rate
+ /* eighth epoch -- still 100% hit rate
*/
if (pass) {
@@ -17427,7 +17427,7 @@ check_auto_cache_resize(hbool_t cork_ageout, unsigned paged)
if (show_progress)
HDfprintf(stderr, "check point %d\n", checkpoint++);
- /* twelth epoch -- force the hit rate to 100% again -- should be steady
+ /* twelfth epoch -- force the hit rate to 100% again -- should be steady
* state.
*/
if (pass) {
@@ -17803,7 +17803,7 @@ check_auto_cache_resize(hbool_t cork_ageout, unsigned paged)
if (show_progress)
HDfprintf(stderr, "check point %d\n", checkpoint++);
- /* eigth epoch -- force the hit rate to 100% again -- should be steady
+ /* eighth epoch -- force the hit rate to 100% again -- should be steady
* state.
*/
if (pass) {
@@ -17903,7 +17903,7 @@ check_auto_cache_resize(hbool_t cork_ageout, unsigned paged)
if (show_progress)
HDfprintf(stderr, "check point %d\n", checkpoint++);
- if (pass) { /* twelth epoch -- at minimum size so no more ageouts */
+ if (pass) { /* twelfth epoch -- at minimum size so no more ageouts */
rpt_fcn_called = FALSE;
i = 0;
@@ -18554,7 +18554,7 @@ check_auto_cache_resize(hbool_t cork_ageout, unsigned paged)
if (show_progress)
HDfprintf(stderr, "check point %d\n", checkpoint++);
- /* twelth epoch -- hit rate 1.0 -- decrease as before.
+ /* twelfth epoch -- hit rate 1.0 -- decrease as before.
*/
if (pass) {
@@ -19567,7 +19567,7 @@ check_auto_cache_resize(hbool_t cork_ageout, unsigned paged)
if (show_progress)
HDfprintf(stderr, "check point %d\n", checkpoint++);
- /* protect and unprotect two more large entries -- shouldnt trigger a
+ /* protect and unprotect two more large entries -- shouldn't trigger a
* flash cache size increase.
*/
if (pass) {
@@ -22127,7 +22127,7 @@ check_auto_cache_resize_disable(unsigned paged)
* mode (add space), which is triggered whenever the size of a newly
* loaded or inserted entry, or the delta between old and new entry
* sizes exceeds some fraction of the current maximum cache size, and
- * the cache doesn't have enough free space to accomodate the new/
+ * the cache doesn't have enough free space to accommodate the new/
* resize entry without performing evictions. The range of permissible
* values for the flash_threshold (0.1 to 1.0 as of this writing), and
* for the flash_multiple (0.1 to 10.0) do not permit the facility to
@@ -22136,7 +22136,7 @@ check_auto_cache_resize_disable(unsigned paged)
* current max_cache_size equal to max_size.
*
* We have already tested the latter in check_auto_cache_resize(), so
- * we need only thest the former here. Do this by disabling flash
+ * we need only test the former here. Do this by disabling flash
* cache size increments via the flash_incr_mode, and then creating
* situations that would trigger flash cache size increases were that
* code enabled.
@@ -25209,7 +25209,7 @@ check_auto_cache_resize_input_errs(unsigned paged)
/*-------------------------------------------------------------------------
* Function: check_auto_cache_resize_aux_fcns()
*
- * Purpose: Verify that the auxilary functions associated with
+ * Purpose: Verify that the auxiliary functions associated with
* the automatic cache resize capability are operating
* correctly. These functions are:
*
@@ -25286,13 +25286,13 @@ check_auto_cache_resize_aux_fcns(unsigned paged)
};
if (paged)
- TESTING("automatic cache resize auxilary functions (paged aggregation)")
+ TESTING("automatic cache resize auxiliary functions (paged aggregation)")
else
- TESTING("automatic cache resize auxilary functions")
+ TESTING("automatic cache resize auxiliary functions")
pass = TRUE;
- /* allocate a cache, and then test the various auxilary functions.
+ /* allocate a cache, and then test the various auxiliary functions.
*/
if (pass) {
@@ -25814,7 +25814,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion, unsigned paged)
if (file_ptr == NULL) {
pass = FALSE;
- failure_mssg = "bad return from cache intialization.\n";
+ failure_mssg = "bad return from cache initialization.\n";
}
else
cache_ptr = file_ptr->shared->cache;
@@ -26033,7 +26033,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion, unsigned paged)
H5C__DIRTIED_FLAG); /* unsigned int flags */
}
- /* Expected staus is that after each insertion, the entry
+ /* Expected status is that after each insertion, the entry
* inserted 26 insertions ago has been flushed, and the
* entry currently getting inserted is now in the cache and
* dirty.
@@ -28440,7 +28440,7 @@ check_flush_deps_err(unsigned paged)
result =
H5C_destroy_flush_dependency(&((entries[entry_type])[0]), &((entries[entry_type])[1]));
if (result != FAIL)
- CACHE_ERROR("Destroying [non-existant] dependency when parent isn't protected")
+ CACHE_ERROR("Destroying [non-existent] dependency when parent isn't protected")
break;
/* Verify that parent entry has flush dependency */
@@ -31671,7 +31671,7 @@ check_metadata_cork(hbool_t fill_via_insertion, unsigned paged)
if (file_ptr == NULL) {
pass = FALSE;
- failure_mssg = "bad return from cache intialization.\n";
+ failure_mssg = "bad return from cache initialization.\n";
}
else
cache_ptr = file_ptr->shared->cache;
@@ -32183,7 +32183,7 @@ check_metadata_cork(hbool_t fill_via_insertion, unsigned paged)
* an entry that is no longer in the cache, and which may have
* been deleted.
*
- * This function contans tests for correct handling on this
+ * This function contains tests for correct handling on this
* situation.
*
* Do nothing if pass is FALSE on entry.
@@ -32440,7 +32440,7 @@ cedds__expunge_dirty_entry_in_flush_test(H5F_t *file_ptr)
*
* (HET, 3) Y 16 KB Y N - -
*
- * Recall that in this test bed, flush operations are excuted the
+ * Recall that in this test bed, flush operations are executed the
* first time the associated entry is flushed, and are then
* deleted.
*/
@@ -32694,7 +32694,7 @@ cedds__H5C_make_space_in_cache(H5F_t *file_ptr)
*
* (HET, 3) Y 16 KB N N - -
*
- * Recall that in this test bed, flush operations are excuted the
+ * Recall that in this test bed, flush operations are executed the
* first time the associated entry is flushed, and are then
* deleted.
*/
@@ -33112,7 +33112,7 @@ cedds__H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *file_ptr)
*
* (MET, 1-31) Y 64 KB N N - -
*
- * Recall that in this test bed, flush operations are excuted the
+ * Recall that in this test bed, flush operations are executed the
* first time the associated entry is flushed, and are then
* deleted.
*/
@@ -33479,7 +33479,7 @@ cedds__H5C_flush_invalidate_cache__bucket_scan(H5F_t *file_ptr)
if (pass) {
- /* load the entry that will have a flush dependencey with (MET, 0),
+ /* load the entry that will have a flush dependency with (MET, 0),
* thus preventing it from being flushed on the first pass through
* the skip list.
*/
diff --git a/test/cache_common.c b/test/cache_common.c
index 3be633d..f1fc73b 100644
--- a/test/cache_common.c
+++ b/test/cache_common.c
@@ -923,7 +923,7 @@ notify_image_len(const void *thing, size_t *image_length)
/*-------------------------------------------------------------------------
* Function: pre_serialize & friends
*
- * Purpose: Pre_serialize the supplied entry. For now this consistes of
+ * Purpose: Pre_serialize the supplied entry. For now this consists of
* executing any flush operations and loading the appropriate
* values into *new_addr_ptr, *new_len_ptr, and *flags_ptr.
*
@@ -1096,7 +1096,7 @@ notify_pre_serialize(H5F_t *f, void *thing, haddr_t addr, size_t len, haddr_t *n
/*-------------------------------------------------------------------------
* Function: serialize & friends
*
- * Purpose: Serialize the supplied entry. For now this consistes of
+ * Purpose: Serialize the supplied entry. For now this consists of
* loading the type and index of the entry into the first
* three bytes of the image (if it is long enough -- if not
* just load the low order byte of the index into the first
@@ -1538,7 +1538,7 @@ add_flush_op(int target_type, int target_idx, int op_code, int type, int idx, hb
* Otherwise, set up a pinned entry dependency so we can
* test the pinned entry modifications to the flush routine.
*
- * Given the types and indicies of the pinned and pinning
+ * Given the types and indices of the pinned and pinning
* entries, add the pinned entry to the list of pinned
* entries in the pinning entry, increment the
* pinning reference count of the pinned entry, and
@@ -1777,7 +1777,7 @@ execute_flush_op(H5F_t *file_ptr, struct test_entry_t *entry_ptr, struct flush_o
case FLUSH_OP__EXPUNGE:
/* the expunge flush op exists to allow us to simulate the
- * case in which an entry is removed from the cashe as the
+ * case in which an entry is removed from the cache as the
* the result of the flush of a second entry. At present,
* this can only happen via the take ownership flag, but
* we will make this test feature more general to as to make
@@ -2322,7 +2322,7 @@ verify_clean(void)
* Function: verify_entry_status
*
* Purpose: Verify that a list of entries have the expected status.
- * If any discrepencies are found, set the failure message
+ * If any discrepancies are found, set the failure message
* and set pass to FALSE.
*
* Do nothing if pass is FALSE on entry.
@@ -3833,8 +3833,8 @@ row_major_scan_forward(H5F_t *file_ptr, int32_t max_index, int32_t lag, hbool_t
HDassert((!cache_ptr->slist_enabled) ||
(cache_ptr->slist_size == cache_ptr->dirty_index_size));
- } /* end if */
- } /* end elsef */
+ }
+ }
if (verbose)
HDfprintf(stdout, "\n");
@@ -5242,7 +5242,7 @@ resize_configs_are_equal(const H5C_auto_size_ctl_t *a, const H5C_auto_size_ctl_t
* has both internal and external configuration matching
* *config_ptr.
*
- * Do nothin on success. On failure, set pass to FALSE, and
+ * Do nothing on success. On failure, set pass to FALSE, and
* load an error message into failue_mssg. Note that
* failure_msg is assumed to be at least 128 bytes in length.
*
diff --git a/test/cache_common.h b/test/cache_common.h
index 901f5c2..91aedc7 100644
--- a/test/cache_common.h
+++ b/test/cache_common.h
@@ -251,7 +251,7 @@ typedef struct flush_op {
typedef enum test_entry_action_t {
TEST_ENTRY_ACTION_NUL = 0, /* No action on entry */
- TEST_ENTRY_ACTION_MOVE /* Entry is beging moved */
+ TEST_ENTRY_ACTION_MOVE /* Entry is being moved */
} test_entry_action_t;
typedef struct test_entry_t {
@@ -370,7 +370,7 @@ typedef struct test_entry_t {
* been resized by a resize flush
* op, and the flush function has
* not yet returned, This field is
- * used to turn off overactive santity
+ * used to turn off overactive sanity
* checking code that would otherwise
* cause a false test failure.
*/
diff --git a/test/cache_image.c b/test/cache_image.c
index f2f7b51..4e8644f 100644
--- a/test/cache_image.c
+++ b/test/cache_image.c
@@ -136,7 +136,7 @@ create_datasets(hid_t file_id, int min_dset, int max_dset)
}
/* set the dataset creation plist to specify that the raw data is
- * to be partioned into 10X10 element chunks.
+ * to be partitioned into 10X10 element chunks.
*/
if (pass) {
@@ -470,7 +470,7 @@ delete_datasets(hid_t file_id, int min_dset, int max_dset)
* Set pass to FALSE and issue a suitable failure
* message if either the file contains a metadata cache image
* superblock extension and mdci_sbem_expected is TRUE, or
- * vise versa.
+ * vice versa.
*
* If mdci_sbem_expected is TRUE, also verify that the metadata
* cache has been advised of this.
@@ -544,7 +544,7 @@ open_hdf5_file(hbool_t create_file, hbool_t mdci_sbem_expected, hbool_t read_onl
if (show_progress)
HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
- /* create a file access propertly list. */
+ /* create a file access property list. */
if (pass) {
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -879,7 +879,7 @@ attempt_swmr_open_hdf5_file(const hbool_t create_file, const hbool_t set_mdci_fa
H5AC_cache_image_config_t cache_image_config = {H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION, TRUE, FALSE,
H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE};
- /* create a file access propertly list. */
+ /* create a file access property list. */
if (pass) {
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1498,7 +1498,7 @@ check_cache_image_ctl_flow_1(void)
* processed as part of the first protect operation after the
* superblock is loaded.)
*
- * In this particular test, we preform the following operations:
+ * In this particular test, we perform the following operations:
*
* 1) Create a HDF5 file with the cache image FAPL entry.
*
@@ -6885,7 +6885,7 @@ cache_image_api_error_check_4(void)
* image.
*
* The objective of this test is to create a test file
- * with both non-empty self referential presistant
+ * with both non-empty self referential persistent
* free space managers, and a cache image, and then
* verify that this situation is handled correctly if
* H5Fget_free_sections() is called before the metadata
diff --git a/test/cache_tagging.c b/test/cache_tagging.c
index 8761ed5..583c665 100644
--- a/test/cache_tagging.c
+++ b/test/cache_tagging.c
@@ -441,7 +441,7 @@ check_file_creation_tags(hid_t fcpl_id, int type)
/* Variable Declarations */
hid_t fid = -1; /* File Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose test outout */
+ int verbose = FALSE; /* verbose test output */
#endif /* NDEBUG */ /* end debugging functions */
haddr_t root_tag = 0;
haddr_t sbe_tag = 0;
@@ -537,7 +537,7 @@ check_file_open_tags(hid_t fcpl, int type)
/* Variable Declarations */
hid_t fid = -1; /* File Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
haddr_t root_tag; /* Root Group Tag */
haddr_t sbe_tag; /* Sblock Extension Tag */
@@ -659,7 +659,7 @@ check_group_creation_tags(void)
hid_t fid = -1; /* File Identifier */
hid_t gid = -1; /* Group Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
haddr_t root_tag = HADDR_UNDEF; /* Root Group Tag */
haddr_t g_tag; /* Group Tag */
@@ -774,7 +774,7 @@ check_multi_group_creation_tags(void)
hid_t fid = -1; /* File Identifier */
hid_t gid = -1; /* Group Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
char gname[16]; /* group name buffer */
int i = 0; /* iterator */
@@ -924,7 +924,7 @@ check_link_iteration_tags(void)
hid_t sid = -1; /* Group Identifier */
hid_t did = -1; /* Group Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
int i = 0; /* iterator */
haddr_t root_tag = 0; /* Root Group Tag Value */
@@ -1058,7 +1058,7 @@ check_dense_attribute_tags(void)
hid_t did = -1; /* Group Identifier */
hid_t dcpl = -1; /* Group Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
int i = 0; /* iterator */
hid_t fapl = -1; /* File access property list */
@@ -1408,7 +1408,7 @@ check_attribute_creation_tags(hid_t fcpl, int type)
hid_t gid = -1; /* Group Identifier */
hid_t sid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
haddr_t root_tag = 0; /* Root group tag */
haddr_t g_tag = 0;
@@ -1566,7 +1566,7 @@ check_attribute_open_tags(hid_t fcpl, int type)
hid_t gid = -1; /* Group Identifier */
hid_t sid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
haddr_t root_tag = 0;
haddr_t g_tag = 0;
@@ -1725,7 +1725,7 @@ check_attribute_rename_tags(hid_t fcpl, int type)
hid_t aid = -1; /* Attribute Identifier */
hid_t sid = -1; /* Dataset Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
int *data = NULL; /* data buffer */
int i, j, k = 0; /* iterators */
@@ -1941,7 +1941,7 @@ check_attribute_delete_tags(hid_t fcpl, int type)
hid_t aid = -1; /* Attribute Identifier */
hid_t sid = -1; /* Dataset Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
int *data = NULL; /* data buffer */
int i, j, k = 0; /* iterators */
@@ -2121,7 +2121,7 @@ check_dataset_creation_tags(hid_t fcpl, int type)
hid_t did = -1; /* Dataset Identifier */
hid_t sid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
hid_t dcpl = -1; /* dataset creation pl */
hsize_t cdims[2] = {1, 1}; /* chunk dimensions */
@@ -2273,7 +2273,7 @@ check_dataset_creation_earlyalloc_tags(hid_t fcpl, int type)
hid_t did = -1; /* Dataset Identifier */
hid_t sid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
hid_t dcpl = -1; /* dataset creation pl */
hsize_t cdims[2] = {1, 1}; /* chunk dimensions */
@@ -2431,7 +2431,7 @@ check_dataset_open_tags(void)
hid_t did = -1; /* Dataset Identifier */
hid_t sid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
hid_t dcpl = -1; /* dataset creation pl */
hsize_t cdims[2] = {1, 1}; /* chunk dimensions */
@@ -2575,7 +2575,7 @@ check_dataset_write_tags(void)
hid_t did = -1; /* Dataset Identifier */
hid_t sid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
hid_t dcpl = -1; /* dataset creation pl */
hsize_t cdims[2] = {1, 1}; /* chunk dimensions */
@@ -2734,7 +2734,7 @@ check_attribute_write_tags(hid_t fcpl, int type)
hid_t aid = -1; /* Attribute Identifier */
hid_t sid = -1; /* Dataset Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
int *data = NULL; /* data buffer */
int i, j, k = 0; /* iterators */
@@ -2912,7 +2912,7 @@ check_dataset_read_tags(void)
hid_t did = -1; /* Dataset Identifier */
hid_t sid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
hid_t dcpl = -1; /* dataset creation pl */
hsize_t cdims[2] = {1, 1}; /* chunk dimensions */
@@ -3066,7 +3066,7 @@ check_dataset_size_retrieval(void)
hid_t did = -1; /* Dataset Identifier */
hid_t sid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
hid_t dcpl = -1; /* dataset creation pl */
hsize_t cdims[2] = {1, 1}; /* chunk dimensions */
@@ -3222,7 +3222,7 @@ check_dataset_extend_tags(void)
hid_t did = -1; /* Dataset Identifier */
hid_t sid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
hid_t dcpl = -1; /* dataset creation pl */
hsize_t cdims[2] = {1, 1}; /* chunk dimensions */
@@ -3642,7 +3642,7 @@ check_link_removal_tags(hid_t fcpl, int type)
hid_t sid = -1; /* Dataspace Identifier */
hid_t gid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
hid_t dcpl = -1; /* dataset creation pl */
hsize_t cdims[2] = {1, 1}; /* chunk dimensions */
@@ -3823,7 +3823,7 @@ check_link_getname_tags(void)
hid_t sid = -1; /* Dataspace Identifier */
hid_t gid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
hid_t dcpl = -1; /* dataset creation pl */
hsize_t cdims[2] = {1, 1}; /* chunk dimensions */
@@ -3992,7 +3992,7 @@ check_external_link_creation_tags(void)
hid_t fid2 = -1; /* File Identifier */
hid_t gid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
haddr_t root_tag = 0;
@@ -4111,7 +4111,7 @@ check_external_link_open_tags(void)
hid_t gid = -1; /* Dataspace Identifier */
hid_t xid = -1; /* Dataspace Identifier */
#ifndef NDEBUG
- int verbose = FALSE; /* verbose file outout */
+ int verbose = FALSE; /* verbose file output */
#endif /* NDEBUG */ /* end debugging functions */
haddr_t root_tag = 0;
haddr_t root2_tag = 0;
@@ -4378,7 +4378,7 @@ main(void)
/* Create a standard file creation property list */
fcpl_default = H5Pcreate(H5P_FILE_CREATE);
- /* Create an fcpl with shared messages and file space managment enabled */
+ /* Create an fcpl with shared messages and file space management enabled */
fcpl_shmesg_all = H5Pcreate(H5P_FILE_CREATE);
H5Pset_shared_mesg_nindexes(fcpl_shmesg_all, 1);
H5Pset_shared_mesg_index(fcpl_shmesg_all, 0, H5O_SHMESG_ALL_FLAG, 20);
diff --git a/test/chunk_info.c b/test/chunk_info.c
index a0fdac5..10661c6 100644
--- a/test/chunk_info.c
+++ b/test/chunk_info.c
@@ -138,7 +138,7 @@ reinit_vars(unsigned *read_flt_msk, haddr_t *addr, hsize_t *size)
* Return: Success: SUCCEED
* Failure: FAIL
*
- * Note: Note that the dataspace argument in these new functions are
+ * Note: Note that the dataspace argument in these new functions is
* currently not used. The functionality involved the dataspace
* will be implemented in the next version.
*
@@ -648,7 +648,7 @@ test_chunk_info_single_chunk(char *filename, hid_t fapl)
if (size != SINGLE_CHUNK_SIZE)
TEST_ERROR
- /* Release resourse */
+ /* Release resource */
if (H5Dclose(dset) < 0)
TEST_ERROR
if (H5Sclose(dspace) < 0)
@@ -906,7 +906,7 @@ test_chunk_info_implicit(char *filename, hid_t fapl)
if (read_each_chunk(dset, i * CHUNK_NX, j * CHUNK_NY, (void *)direct_buf[n]) < 0)
TEST_ERROR
- /* Release resourse */
+ /* Release resource */
if (H5Dclose(dset) < 0)
TEST_ERROR
if (H5Sclose(dspace) < 0)
@@ -1153,7 +1153,7 @@ test_chunk_info_fixed_array(char *filename, hid_t fapl)
if (read_each_chunk(dset, i * CHUNK_NX, j * CHUNK_NY, (void *)direct_buf[n]) < 0)
TEST_ERROR
- /* Release resourse */
+ /* Release resource */
if (H5Dclose(dset) < 0)
TEST_ERROR
if (H5Sclose(dspace) < 0)
@@ -1397,7 +1397,7 @@ test_chunk_info_extensible_array(char *filename, hid_t fapl)
if (read_each_chunk(dset, i * CHUNK_NX, j * CHUNK_NY, (void *)direct_buf[n]) < 0)
TEST_ERROR
- /* Release resourse */
+ /* Release resource */
if (H5Dclose(dset) < 0)
TEST_ERROR
if (H5Sclose(dspace) < 0)
@@ -1641,7 +1641,7 @@ test_chunk_info_version2_btrees(char *filename, hid_t fapl)
if (read_each_chunk(dset, i * CHUNK_NX, j * CHUNK_NY, (void *)direct_buf[n]) < 0)
TEST_ERROR
- /* Release resourse */
+ /* Release resource */
if (H5Dclose(dset) < 0)
TEST_ERROR
if (H5Sclose(dspace) < 0)
diff --git a/test/cmpd_dset.c b/test/cmpd_dset.c
index d918bcc..f156655 100644
--- a/test/cmpd_dset.c
+++ b/test/cmpd_dset.c
@@ -713,7 +713,7 @@ test_compound(char *filename, hid_t fapl)
/*
*######################################################################
* Step 12: Write an array into the middle third of the dataset
- * initializeing only members `b' and `d' to -1.
+ * initializing only members `b' and `d' to -1.
*/
TESTING("hyperslab part initialized write");
diff --git a/test/cork.c b/test/cork.c
index 94f3bb0..6185e12 100644
--- a/test/cork.c
+++ b/test/cork.c
@@ -97,7 +97,7 @@ verify_old_dset_cork(void)
hsize_t dims[2] = {100, 20}; /* Dataset dimension sizes */
hsize_t max_dims[2] = {100, H5S_UNLIMITED}; /* Dataset maximum dimension sizes */
hsize_t chunk_dims[2] = {2, 5}; /* Dataset chunked dimension sizes */
- int **buf = NULL; /* Data bufer (pointers to fake 2D array) */
+ int **buf = NULL; /* Data buffer (pointers to fake 2D array) */
int *buf_data = NULL; /* Data buffer (actual data) */
int i = 0, j = 0; /* Local index variables */
H5O_info_t oinfo, oinfo2, oinfo3; /* Object metadata information */
@@ -509,7 +509,7 @@ verify_dset_cork(hbool_t swmr, hbool_t new_format)
hsize_t dims[2] = {100, 20}; /* Dataset dimension sizes */
hsize_t max_dims[2] = {100, H5S_UNLIMITED}; /* Dataset maximum dimension sizes */
hsize_t chunk_dims[2] = {2, 5}; /* Dataset chunked dimension sizes */
- int **buf = NULL; /* Data bufer (pointers to fake 2D array) */
+ int **buf = NULL; /* Data buffer (pointers to fake 2D array) */
int *buf_data = NULL; /* Data buffer (actual data) */
int i = 0, j = 0; /* Local index variables */
H5O_info_t oinfo, oinfo2, oinfo3; /* Object metadata information */
@@ -1883,7 +1883,7 @@ test_dset_cork(hbool_t swmr, hbool_t new_format)
{
hid_t fid = H5I_INVALID_HID; /* File ID */
hid_t fapl = H5I_INVALID_HID; /* File access property list */
- hid_t gid = H5I_INVALID_HID; /* Groupd ID */
+ hid_t gid = H5I_INVALID_HID; /* Group ID */
hid_t did1 = H5I_INVALID_HID, did2 = H5I_INVALID_HID; /* Dataset IDs */
hid_t tid1 = H5I_INVALID_HID, tid2 = H5I_INVALID_HID; /* Datatype IDs */
hid_t sid = H5I_INVALID_HID; /* Dataspace ID */
diff --git a/test/direct_chunk.c b/test/direct_chunk.c
index 1c4d6d7..3d233f6 100644
--- a/test/direct_chunk.c
+++ b/test/direct_chunk.c
@@ -686,7 +686,7 @@ filter_bogus1(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts,
size_t buf_left = *buf_size; /* Amount of data buffer left to process */
if (flags & H5Z_FLAG_REVERSE) { /* read */
- /* Substract the "add on" value to all the data values */
+ /* Subtract the "add on" value to all the data values */
while (buf_left > 0) {
*int_ptr++ -= (int)ADD_ON;
buf_left -= sizeof(int);
@@ -722,7 +722,7 @@ filter_bogus2(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts,
size_t buf_left = *buf_size; /* Amount of data buffer left to process */
if (flags & H5Z_FLAG_REVERSE) { /* read */
- /* Substract the "add on" value to all the data values */
+ /* Subtract the "add on" value to all the data values */
while (buf_left > 0) {
*int_ptr++ /= (int)FACTOR;
buf_left -= sizeof(int);
@@ -1427,7 +1427,7 @@ error:
* Function: test_direct_chunk_read_no_cache
*
* Purpose: Test the basic functionality of H5Dread_chunk with the
- * chunk cache diabled.
+ * chunk cache disabled.
*
* Return: Success: 0
* Failure: 1
@@ -1963,7 +1963,7 @@ error:
* Function: test_read_unallocated_chunk
*
* Purpose: Tests the H5Dread_chunk and H5Dget_chunk_storage_size with valid
- * offets to chunks that have not been written to the dataset and are
+ * offsets to chunks that have not been written to the dataset and are
* not allocated in the chunk storage on disk.
*
* Return: Success: 0
@@ -2014,7 +2014,7 @@ test_read_unallocated_chunk(hid_t file)
if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
FAIL_STACK_ERROR;
- /* Write a single chunk to intialize the chunk storage */
+ /* Write a single chunk to initialize the chunk storage */
HDmemset(direct_buf, 0, CHUNK_NX * CHUNK_NY * sizeof(int));
offset[0] = 0;
offset[1] = 0;
@@ -2031,7 +2031,7 @@ test_read_unallocated_chunk(hid_t file)
offset[0] = i * CHUNK_NX;
offset[1] = j * CHUNK_NY;
- /* Read a non-existant chunk using the direct read function. */
+ /* Read a non-existent chunk using the direct read function. */
H5E_BEGIN_TRY
{
status = H5Dread_chunk(dataset, dxpl, offset, &filter_mask, &direct_buf);
@@ -2042,7 +2042,7 @@ test_read_unallocated_chunk(hid_t file)
if (status != -1)
TEST_ERROR
- /* Query the size of the non-existant chunk */
+ /* Query the size of the non-existent chunk */
direct_chunk_nbytes = ULONG_MAX;
H5E_BEGIN_TRY
{
diff --git a/test/dsets.c b/test/dsets.c
index 1569f9a..330c7a0 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -1821,7 +1821,7 @@ filter_corrupt(unsigned int flags, size_t cd_nelmts, const unsigned int *cd_valu
TEST_ERROR
HDmemset(data, (int)value, (size_t)length);
- if (flags & H5Z_FLAG_REVERSE) { /* Varify data is actually corrupted during read */
+ if (flags & H5Z_FLAG_REVERSE) { /* Verify data is actually corrupted during read */
dst += offset;
if (HDmemcmp(data, dst, (size_t)length) != 0)
TEST_ERROR
@@ -2005,7 +2005,7 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
if (status >= 0)
TEST_ERROR;
- /* Callback decides to continue inspite data is corrupted. */
+ /* Callback decides to continue in spite data is corrupted. */
if (H5Pset_filter_callback(dxpl, filter_cb_cont, NULL) < 0)
TEST_ERROR;
if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check_data) < 0)
@@ -2072,7 +2072,7 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
if (status >= 0)
TEST_ERROR;
- /* Callback decides to continue inspite data is corrupted. */
+ /* Callback decides to continue in spite data is corrupted. */
if (H5Pset_filter_callback(dxpl, filter_cb_cont, NULL) < 0)
TEST_ERROR;
if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check_data) < 0)
@@ -2136,7 +2136,7 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
if (status >= 0)
TEST_ERROR;
- /* Callback decides to continue inspite data is corrupted. */
+ /* Callback decides to continue in spite data is corrupted. */
if (H5Pset_filter_callback(dxpl, filter_cb_cont, NULL) < 0)
TEST_ERROR;
if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check_data) < 0)
@@ -2203,7 +2203,7 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
if (status >= 0)
TEST_ERROR;
- /* Callback decides to continue inspite data is corrupted. */
+ /* Callback decides to continue in spite data is corrupted. */
if (H5Pset_filter_callback(dxpl, filter_cb_cont, NULL) < 0)
TEST_ERROR;
if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check_data) < 0)
@@ -3234,7 +3234,7 @@ test_nbit_int(hid_t file)
power = HDpow(2.0, (double)(precision - 1));
orig_data[i][j] = (int)(((long long)HDrandom() % (long long)power) << offset);
- /* even-numbered values are negtive */
+ /* even-numbered values are negative */
if ((i * size[1] + j + 1) % 2 == 0)
orig_data[i][j] = -orig_data[i][j];
}
@@ -3812,7 +3812,7 @@ test_nbit_compound(hid_t file)
orig_data[i][j].s = (short)(((long long)HDrandom() % (long long)power) << offset[2]);
orig_data[i][j].f = float_val[i][j];
- /* some even-numbered integer values are negtive */
+ /* some even-numbered integer values are negative */
if ((i * size[1] + j + 1) % 2 == 0) {
orig_data[i][j].i = -orig_data[i][j].i;
orig_data[i][j].s = (short)-orig_data[i][j].s;
@@ -4526,7 +4526,7 @@ test_nbit_int_size(hid_t file)
goto error;
}
- /* Initiliaze data buffer with random data within correct range
+ /* Initialize data buffer with random data within correct range
* corresponding to the memory datatype's precision and offset.
*/
for (i = 0; i < DSET_DIM1; i++)
@@ -4739,7 +4739,7 @@ test_nbit_flt_size(hid_t file)
} /* end if */
/*
- * Initiliaze data buffer with random data
+ * Initialize data buffer with random data
*/
for (i = 0; i < DSET_DIM1; i++)
for (j = 0; j < DSET_DIM2; j++)
@@ -4898,7 +4898,7 @@ test_scaleoffset_int(hid_t file)
for (j = 0; j < (size_t)size[1]; j++) {
orig_data[i][j] = HDrandom() % 10000;
- /* even-numbered values are negtive */
+ /* even-numbered values are negative */
if ((i * size[1] + j + 1) % 2 == 0)
orig_data[i][j] = -orig_data[i][j];
}
@@ -5042,7 +5042,7 @@ test_scaleoffset_int_2(hid_t file)
for (j = 0; j < (size_t)size[1]; j++) {
orig_data[0][j] = (int)HDrandom() % 10000;
- /* even-numbered values are negtive */
+ /* even-numbered values are negative */
if ((j + 1) % 2 == 0)
orig_data[0][j] = -orig_data[0][j];
}
@@ -5164,7 +5164,7 @@ test_scaleoffset_float(hid_t file)
for (j = 0; j < (size_t)size[1]; j++) {
orig_data[i][j] = (float)(HDrandom() % 100000) / 1000.0F;
- /* even-numbered values are negtive */
+ /* even-numbered values are negative */
if ((i * size[1] + j + 1) % 2 == 0)
orig_data[i][j] = -orig_data[i][j];
}
@@ -5310,7 +5310,7 @@ test_scaleoffset_float_2(hid_t file)
for (j = 0; j < (size_t)size[1]; j++) {
orig_data[0][j] = (float)(HDrandom() % 100000) / 1000.0F;
- /* even-numbered values are negtive */
+ /* even-numbered values are negative */
if ((j + 1) % 2 == 0)
orig_data[0][j] = -orig_data[0][j];
}
@@ -5431,7 +5431,7 @@ test_scaleoffset_double(hid_t file)
for (j = 0; j < (size_t)size[1]; j++) {
orig_data[i][j] = (float)(HDrandom() % 10000000) / 10000000.0F;
- /* even-numbered values are negtive */
+ /* even-numbered values are negative */
if ((i * size[1] + j + 1) % 2 == 0)
orig_data[i][j] = -orig_data[i][j];
}
@@ -5577,7 +5577,7 @@ test_scaleoffset_double_2(hid_t file)
for (j = 0; j < (size_t)size[1]; j++) {
orig_data[0][j] = (float)(HDrandom() % 10000000) / 10000000.0F;
- /* even-numbered values are negtive */
+ /* even-numbered values are negative */
if ((j + 1) % 2 == 0)
orig_data[0][j] = -orig_data[0][j];
}
@@ -7712,7 +7712,7 @@ test_missing_chunk(hid_t file)
if (H5Dread(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata2_bytes) < 0)
TEST_ERROR;
- /* Validata values read for the 1-D dataset */
+ /* Validate values read for the 1-D dataset */
for (u = 0; u < MISSING_CHUNK_DIM; u++) {
if ((u % 10) >= 5) {
if (rdata[u] != 911) {
@@ -7729,7 +7729,7 @@ test_missing_chunk(hid_t file)
} /* end else */
} /* end for */
- /* Validata values read for the 2-D dataset */
+ /* Validate values read for the 2-D dataset */
for (i = 0; i < MISSING_CHUNK_DIM; i++) {
for (j = 0; j < MISSING_CHUNK_DIM; j++) {
@@ -8835,7 +8835,7 @@ test_chunk_cache(hid_t fapl)
if ((dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl1)) < 0)
FAIL_STACK_ERROR
- /* Retrieve dapl from dataset, verfiy cache values are the same as on fapl_local */
+ /* Retrieve dapl from dataset, verify cache values are the same as on fapl_local */
if ((dapl2 = H5Dget_access_plist(dsid)) < 0)
FAIL_STACK_ERROR
if (H5Pget_chunk_cache(dapl2, &nslots_4, &nbytes_4, &w0_4) < 0)
@@ -8859,7 +8859,7 @@ test_chunk_cache(hid_t fapl)
if ((dsid = H5Oopen(fid, "dset", dapl1)) < 0)
FAIL_STACK_ERROR
- /* Retrieve dapl from dataset, verfiy cache values are the same as on dapl1 */
+ /* Retrieve dapl from dataset, verify cache values are the same as on dapl1 */
/* Note we rely on the knowledge that H5Pget_chunk_cache retrieves these
* values directly from the dataset structure, and not from a copy of the
* dapl used to open the dataset (which is not preserved).
@@ -8879,7 +8879,7 @@ test_chunk_cache(hid_t fapl)
if ((dsid = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0)
FAIL_STACK_ERROR
- /* Retrieve dapl from dataset, verfiy cache values are the same on fapl_local */
+ /* Retrieve dapl from dataset, verify cache values are the same on fapl_local */
if ((dapl2 = H5Dget_access_plist(dsid)) < 0)
FAIL_STACK_ERROR
if (H5Pget_chunk_cache(dapl2, &nslots_4, &nbytes_4, &w0_4) < 0)
@@ -8889,7 +8889,7 @@ test_chunk_cache(hid_t fapl)
if (H5Pclose(dapl2) < 0)
FAIL_STACK_ERROR
- /* Similary, test use of H5Dcreate2 with H5P_DEFAULT */
+ /* Similarly, test use of H5Dcreate2 with H5P_DEFAULT */
if (H5Dclose(dsid) < 0)
FAIL_STACK_ERROR
if ((dsid = H5Dcreate2(fid, "dset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
@@ -11974,7 +11974,7 @@ error:
* Purpose: Checks that a file created with either:
* (a) SWMR-write + non-latest-format
* (b) write + latest format
- * will generate datset with latest chunk indexing type.
+ * will generate dataset with latest chunk indexing type.
*
* Return: Success: 0
* Failure: -1
@@ -14711,7 +14711,7 @@ test_versionbounds(void)
hid_t dcpl = -1; /* Dataset creation property list */
hid_t srcspace = -1; /* Source dataspaces */
hid_t vspace = -1; /* Virtual dset dataspaces */
- hid_t srcdset = -1; /* Source datset */
+ hid_t srcdset = -1; /* Source dataset */
hid_t vdset = -1; /* Virtual dataset */
hsize_t dims[1] = {3}; /* Data space current size */
char srcfilename[FILENAME_BUF_SIZE];
@@ -14847,7 +14847,7 @@ error:
/*-----------------------------------------------------------------------------
* Function: test_object_header_minimization_dcpl
*
- * Purpose: Test the "datset object header minimization" property as part of
+ * Purpose: Test the "dataset object header minimization" property as part of
* the DCPL.
*
* Return: Success/pass: 0
@@ -14889,15 +14889,13 @@ test_object_header_minimization_dcpl(void)
/* TESTS */
/*********/
- /* default value (not set explicitly)
- */
+ /* Default value (not set explicitly) */
if (H5Pget_dset_no_attrs_hint(dcpl_id, &minimize) == FAIL)
TEST_ERROR
if (FALSE != minimize)
TEST_ERROR
- /* FALSE-set value
- */
+ /* FALSE-set value */
if (H5Pset_dset_no_attrs_hint(dcpl_id, FALSE) == FAIL)
TEST_ERROR
if (H5Pget_dset_no_attrs_hint(dcpl_id, &minimize) == FAIL)
@@ -14905,8 +14903,7 @@ test_object_header_minimization_dcpl(void)
if (FALSE != minimize)
TEST_ERROR
- /* TRUE-set value
- */
+ /* TRUE-set value */
if (H5Pset_dset_no_attrs_hint(dcpl_id, TRUE) == FAIL)
TEST_ERROR
if (H5Pget_dset_no_attrs_hint(dcpl_id, &minimize) == FAIL)
@@ -15190,7 +15187,7 @@ main(void)
double rdcc_w0;
int nerrors = 0;
const char *envval;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
int i;
/* Don't run this test using certain file drivers */
@@ -15198,7 +15195,7 @@ main(void)
if (envval == NULL)
envval = "nomatch";
- /* Current VFD that does not support contigous address space */
+ /* Current VFD that does not support contiguous address space */
contig_addr_vfd = (hbool_t)(HDstrcmp(envval, "split") != 0 && HDstrcmp(envval, "multi") != 0);
/* Set the random # seed */
diff --git a/test/dt_arith.c b/test/dt_arith.c
index f51ccd6..7f99649 100644
--- a/test/dt_arith.c
+++ b/test/dt_arith.c
@@ -27,7 +27,7 @@
#define FP_EPSILON 0.000001F
/*
- * Offset from alinged memory returned by malloc(). This can be used to test
+ * Offset from aligned memory returned by malloc(). This can be used to test
* that type conversions handle non-aligned buffers correctly.
*/
#define ALIGNMENT 1
@@ -225,7 +225,7 @@ static int without_hardware_g = 0;
/*The number of values if multiplied by 10 for each step.*/ \
num_norm = (SRC_MAX_10_EXP - SRC_MIN_10_EXP); \
/*Reduce the number of values by 2^factor. MULTIPLY=10^(2^factor). Using this algorithm \
- *instead of arithmatic operation to avoid any conversion*/ \
+ *instead of arithmetic operation to avoid any conversion*/ \
num_norm >>= factor; \
\
/*Total number of values*/ \
@@ -644,7 +644,7 @@ test_particular_fp_integer(void)
float src_f = (float)INT_MAX;
int dst_i;
int fill_value = 13;
- int endian; /*endianess */
+ int endian; /*endianness */
unsigned int fails_this_test = 0;
size_t j;
@@ -822,7 +822,7 @@ test_derived_flt(void)
size_t src_size, dst_size;
unsigned char *buf = NULL, *saved_buf = NULL;
int *aligned = NULL;
- int endian; /*endianess */
+ int endian; /*endianness */
size_t nelmts = NTESTELEM;
unsigned int fails_this_test = 0;
const size_t max_fails = 40; /*max number of failures*/
@@ -1271,7 +1271,7 @@ test_derived_integer(void)
char filename[1024];
size_t src_size, dst_size;
unsigned char *buf = NULL, *saved_buf = NULL;
- int endian; /*endianess */
+ int endian; /*endianness */
size_t nelmts = NTESTELEM;
unsigned int fails_this_test = 0;
const size_t max_fails = 40; /*max number of failures*/
@@ -1595,7 +1595,7 @@ test_conv_int_1(const char *name, hid_t src, hid_t dst)
dtype_t src_type, dst_type; /*data types */
const char *src_type_name = NULL; /*source type name */
const char *dst_type_name = NULL; /*destination type name */
- int endian; /*machine endianess */
+ int endian; /*machine endianness */
size_t src_size, dst_size; /*type sizes */
unsigned char *buf = NULL; /*buffer for conversion */
unsigned char *saved = NULL; /*original values */
@@ -2835,8 +2835,8 @@ test_conv_flt_1(const char *name, int run_test, hid_t src, hid_t dst)
int overflow = 0; /*overflow occurred */
int uflow = 0; /*underflow debug counters*/
size_t j, k; /*counters */
- int sendian; /* source type endianess */
- int dendian; /* Destination type endianess */
+ int sendian; /* source type endianness */
+ int dendian; /* Destination type endianness */
size_t dst_ebias; /* Destination type's exponent bias */
size_t src_epos; /* Source type's exponent position */
size_t src_esize; /* Source type's exponent size */
@@ -3153,7 +3153,7 @@ test_conv_flt_1(const char *name, int run_test, hid_t src, hid_t dst)
/*
* Assume same if both results are NaN. There are many NaN bit
- * patterns and the software doesn't attemt to emulate the
+ * patterns and the software doesn't attempt to emulate the
* hardware in this regard. Instead, software uses a single bit
* pattern for NaN by setting the significand to all ones.
*/
@@ -3432,8 +3432,8 @@ test_conv_int_fp(const char *name, int run_test, hid_t src, hid_t dst)
dtype_t dst_type; /*data types */
const char *src_type_name = NULL; /*source type name */
const char *dst_type_name = NULL; /*destination type name */
- int sendian; /*source endianess */
- int dendian; /*destination endianess */
+ int sendian; /*source endianness */
+ int dendian; /*destination endianness */
size_t src_size, dst_size; /*type sizes */
unsigned char *buf = NULL; /*buffer for conversion */
unsigned char *saved = NULL; /*original values */
@@ -3682,8 +3682,8 @@ test_conv_int_fp(const char *name, int run_test, hid_t src, hid_t dst)
}
/* Allocate and initialize the source buffer through macro INIT_INTEGER if the source is integer,
- * INIT_FP_NORM if floating-point. The BUF will be used for the conversion while the SAVED buffer will be
- * used for the comparison later.
+ * INIT_FP_NORM if floating-point. The BUF will be used for the conversion while the SAVED buffer
+ * will be used for the comparison later.
*/
if (src_type == INT_SCHAR) {
INIT_INTEGER(signed char, SCHAR_MAX, SCHAR_MIN, src_size, dst_size, src_nbits, buf, saved, nelmts);
diff --git a/test/dtypes.c b/test/dtypes.c
index 17853f0..6f753b4 100644
--- a/test/dtypes.c
+++ b/test/dtypes.c
@@ -29,7 +29,7 @@
#define ARRAY_DIM 4
/*
- * Offset from alinged memory returned by malloc(). This can be used to test
+ * Offset from aligned memory returned by malloc(). This can be used to test
* that type conversions handle non-aligned buffers correctly.
*/
#define ALIGNMENT 1
@@ -2639,7 +2639,7 @@ test_compound_13(void)
data_out.x[u] = (unsigned char)u;
data_out.y = 99.99f;
- /* Set latest_format in access propertly list to enable the latest
+ /* Set latest_format in access property list to enable the latest
* compound datatype format.
*/
if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
@@ -3924,7 +3924,7 @@ error:
*
* Modifications:
* Raymond Lu
- * Wednesday, Febuary 9, 2005
+ * Wednesday, February 9, 2005
* Added test for H5Tenum_valueof, H5Tenum_nameof, and
* H5Tget_member_value.
*-------------------------------------------------------------------------
@@ -6977,7 +6977,7 @@ test_set_order(void)
{
hid_t dtype; /* Datatype ID */
H5T_order_t order; /* Byte order */
- hsize_t dims[2] = {3, 4}; /* Array dimenstions */
+ hsize_t dims[2] = {3, 4}; /* Array dimensions */
herr_t ret; /* Generic return value */
TESTING("H5Tset/get_order");
@@ -7225,7 +7225,7 @@ test_set_order_compound(hid_t fapl)
hid_t file = -1;
hid_t cmpd = -1, memb_cmpd = -1, memb_array1 = -1, memb_array2 = -1, cmpd_array = -1;
hid_t vl_id = -1;
- hsize_t dims[2] = {3, 4}; /* Array dimenstions */
+ hsize_t dims[2] = {3, 4}; /* Array dimensions */
char filename[1024];
herr_t ret; /* Generic return value */
@@ -8080,7 +8080,7 @@ test_deprec(hid_t fapl)
unsigned u; /* Local index variable */
herr_t status; /* Generic routine value */
- TESTING("deprected API routines for datatypes");
+ TESTING("deprecated API routines for datatypes");
/* Create an array datatype with an atomic base type */
/* (dimension permutations allowed, but not stored) */
diff --git a/test/earray.c b/test/earray.c
index fa9dd0a..40cce1b 100644
--- a/test/earray.c
+++ b/test/earray.c
@@ -1283,7 +1283,7 @@ typedef struct eiter_fw_t {
/*-------------------------------------------------------------------------
* Function: eiter_fw_init
*
- * Purpose: Initialize element interator (forward iteration)
+ * Purpose: Initialize element iterator (forward iteration)
*
* Return: Success: Pointer to iteration status object
* Failure: NULL
@@ -1427,7 +1427,7 @@ eiter_fw_state(void *in_eiter, const H5EA_create_t *cparam, const earray_test_pa
/*-------------------------------------------------------------------------
* Function: eiter_fw_term
*
- * Purpose: Shut down element interator (forward iteration)
+ * Purpose: Shut down element iterator (forward iteration)
*
* Return: Success: 0
* Failure: -1
@@ -1471,7 +1471,7 @@ typedef struct eiter_rv_t {
/*-------------------------------------------------------------------------
* Function: eiter_rv_init
*
- * Purpose: Initialize element interator (reverse iteration)
+ * Purpose: Initialize element iterator (reverse iteration)
*
* Return: Success: Pointer to iteration status object
* Failure: NULL
@@ -1652,7 +1652,7 @@ eiter_rv_state(void *in_eiter, const H5EA_create_t *cparam, const earray_test_pa
/*-------------------------------------------------------------------------
* Function: eiter_rv_term
*
- * Purpose: Shut down element interator (reverse iteration)
+ * Purpose: Shut down element iterator (reverse iteration)
*
* Return: Success: 0
* Failure: -1
@@ -1693,7 +1693,7 @@ typedef struct eiter_rnd_t {
/*-------------------------------------------------------------------------
* Function: eiter_rnd_init
*
- * Purpose: Initialize element interator (random iteration)
+ * Purpose: Initialize element iterator (random iteration)
*
* Return: Success: Pointer to iteration status object
* Failure: NULL
@@ -1802,7 +1802,7 @@ eiter_rnd_max(const void *in_eiter)
/*-------------------------------------------------------------------------
* Function: eiter_rnd_term
*
- * Purpose: Shut down element interator (random iteration)
+ * Purpose: Shut down element iterator (random iteration)
*
* Return: Success: 0
* Failure: -1
@@ -1842,7 +1842,7 @@ static const earray_iter_t ea_iter_rnd = {
/*-------------------------------------------------------------------------
* Function: eiter_rnd2_init
*
- * Purpose: Initialize element interator (random #2 iteration)
+ * Purpose: Initialize element iterator (random #2 iteration)
*
* Return: Success: Pointer to iteration status object
* Failure: NULL
@@ -1925,7 +1925,7 @@ typedef struct eiter_cyc_t {
/*-------------------------------------------------------------------------
* Function: eiter_cyc_init
*
- * Purpose: Initialize element interator (cyclic iteration)
+ * Purpose: Initialize element iterator (cyclic iteration)
*
* Return: Success: Pointer to iteration status object
* Failure: NULL
@@ -2018,7 +2018,7 @@ eiter_cyc_max(const void *in_eiter)
/*-------------------------------------------------------------------------
* Function: eiter_cyc_term
*
- * Purpose: Shut down element interator (cyclic iteration)
+ * Purpose: Shut down element iterator (cyclic iteration)
*
* Return: Success: 0
* Failure: -1
diff --git a/test/efc.c b/test/efc.c
index 3749e25..06881fb 100644
--- a/test/efc.c
+++ b/test/efc.c
@@ -1,4 +1,5 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
* Copyright by the Board of Trustees of the University of Illinois. *
* All rights reserved. *
* *
diff --git a/test/enc_dec_plist_cross_platform.c b/test/enc_dec_plist_cross_platform.c
index 8fb6d0b..d843f98 100644
--- a/test/enc_dec_plist_cross_platform.c
+++ b/test/enc_dec_plist_cross_platform.c
@@ -28,7 +28,7 @@ int
main(void)
{
if (VERBOSE_MED)
- HDprintf("Encode/Decode property list endianess\n");
+ HDprintf("Encode/Decode property list endianness\n");
/******* ENCODE/DECODE DCPLS *****/
TESTING("Default DCPL Encoding/Decoding");
diff --git a/test/enum.c b/test/enum.c
index 3923892..5695a79 100644
--- a/test/enum.c
+++ b/test/enum.c
@@ -541,7 +541,7 @@ test_value_dsnt_exist(void)
hid_t datatype_id = (-1); /* identifiers */
int val;
- char nam[100];
+ char name[100];
size_t size = 100;
TESTING("for non-existing name and value");
/* Turn off error reporting since we expect failure in this test */
@@ -552,11 +552,11 @@ test_value_dsnt_exist(void)
if ((datatype_id = H5Tenum_create(H5T_NATIVE_INT)) < 0)
goto error;
- /* These calls should fail, since no memebrs exist yet */
+ /* These calls should fail, since no members exist yet */
if (H5Tenum_valueof(datatype_id, "SAX", &val) >= 0)
goto error;
val = 3;
- if (H5Tenum_nameof(datatype_id, &val, nam, size) >= 0)
+ if (H5Tenum_nameof(datatype_id, &val, name, size) >= 0)
goto error;
val = 2;
@@ -571,17 +571,17 @@ test_value_dsnt_exist(void)
/* This call should fail since we did not create a member with value = 3*/
val = 3;
- if (H5Tenum_nameof(datatype_id, &val, nam, size) >= 0)
+ if (H5Tenum_nameof(datatype_id, &val, name, size) >= 0)
goto error;
/* This call should fail since we did not create a member with value = 11*/
val = 11;
- if (H5Tenum_nameof(datatype_id, &val, nam, size) >= 0)
+ if (H5Tenum_nameof(datatype_id, &val, name, size) >= 0)
goto error;
/* This call should fail since we did not create a member with value = 0*/
val = 0;
- if (H5Tenum_nameof(datatype_id, &val, nam, size) >= 0)
+ if (H5Tenum_nameof(datatype_id, &val, name, size) >= 0)
goto error;
/* This call should fail since we do not have SAX name in the type */
diff --git a/test/external.c b/test/external.c
index b3250c1..41a0eb8 100644
--- a/test/external.c
+++ b/test/external.c
@@ -1217,7 +1217,7 @@ test_path_relative_cwd(hid_t fapl)
}
H5E_END_TRY;
if (dset3 >= 0)
- FAIL_PUTS_ERROR("reopening the dataset with a different efile_prefix succeded");
+ FAIL_PUTS_ERROR("reopening the dataset with a different efile_prefix succeeded");
/* Read the entire dataset and compare with the original */
HDmemset(whole, 0, sizeof(whole));
@@ -1233,7 +1233,7 @@ test_path_relative_cwd(hid_t fapl)
if (H5Dclose(dset) < 0)
FAIL_STACK_ERROR
- /* Open dataset (use a differend prefix than for create.
+ /* Open dataset (use a different prefix than for create.
* This works because the dataset was closed.
*/
if (H5Pset_efile_prefix(dapl2, "${ORIGIN}/.") < 0)
@@ -1254,7 +1254,7 @@ test_path_relative_cwd(hid_t fapl)
}
H5E_END_TRY;
if (dset3 >= 0)
- FAIL_PUTS_ERROR("reopening the dataset with a different efile_prefix succeded");
+ FAIL_PUTS_ERROR("reopening the dataset with a different efile_prefix succeeded");
/* Read the entire dataset and compare with the original */
HDmemset(whole, 0, sizeof(whole));
diff --git a/test/farray.c b/test/farray.c
index 9f7e43f..ab04b68 100644
--- a/test/farray.c
+++ b/test/farray.c
@@ -968,7 +968,7 @@ typedef struct fiter_fw_t {
/*-------------------------------------------------------------------------
* Function: fiter_fw_init
*
- * Purpose: Initialize element interator (forward iteration)
+ * Purpose: Initialize element iterator (forward iteration)
*
* Return: Success: Pointer to iteration status object
* Failure: NULL
@@ -1020,7 +1020,7 @@ fiter_fw_next(void *_fiter)
/*-------------------------------------------------------------------------
* Function: fiter_term
*
- * Purpose: Shut down element interator (simple iterators)
+ * Purpose: Shut down element iterator (simple iterators)
*
* Return: Success: 0
* Failure: -1
@@ -1054,7 +1054,7 @@ typedef struct fiter_rv_t {
/*-------------------------------------------------------------------------
* Function: fiter_rv_init
*
- * Purpose: Initialize element interator (reverse iteration)
+ * Purpose: Initialize element iterator (reverse iteration)
*
* Return: Success: Pointer to iteration status object
* Failure: NULL
@@ -1119,7 +1119,7 @@ typedef struct fiter_rnd_t {
/*-------------------------------------------------------------------------
* Function: fiter_rnd_init
*
- * Purpose: Initialize element interator (random iteration)
+ * Purpose: Initialize element iterator (random iteration)
*
* Return: Success: Pointer to iteration status object
* Failure: NULL
@@ -1192,7 +1192,7 @@ fiter_rnd_next(void *_fiter)
/*-------------------------------------------------------------------------
* Function: fiter_rnd_term
*
- * Purpose: Shut down element interator (random iteration)
+ * Purpose: Shut down element iterator (random iteration)
*
* Return: Success: 0
* Failure: -1
@@ -1234,7 +1234,7 @@ typedef struct fiter_cyc_t {
/*-------------------------------------------------------------------------
* Function: fiter_cyc_init
*
- * Purpose: Initialize element interator (cyclic iteration)
+ * Purpose: Initialize element iterator (cyclic iteration)
*
* Return: Success: Pointer to iteration status object
* Failure: NULL
diff --git a/test/fheap.c b/test/fheap.c
index 8969ab4..c584cb5 100644
--- a/test/fheap.c
+++ b/test/fheap.c
@@ -2469,7 +2469,7 @@ test_id_limits(hid_t fapl, H5HF_create_t *cparam, hid_t fcpl)
size_t id_len; /* Size of fractal heap IDs */
size_t tiny_max_len; /* Max. length of tiny objects */
hbool_t tiny_len_extended; /* Do tiny objects use two bytes for the length? */
- hbool_t huge_ids_direct; /* Are 'huge' objects directly acccessed? */
+ hbool_t huge_ids_direct; /* Are 'huge' objects directly accessed? */
/* Set the filename to use for this test (dependent on fapl) */
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
@@ -13785,7 +13785,7 @@ test_filtered_huge(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam
fheap_heap_state_t state; /* State of fractal heap */
unsigned deflate_level; /* Deflation level */
size_t old_actual_id_len = 0; /* Old actual ID length */
- hbool_t huge_ids_direct; /* Are 'huge' objects directly acccessed? */
+ hbool_t huge_ids_direct; /* Are 'huge' objects directly accessed? */
/* Test description */
const char *base_desc = "insert 'huge' object into heap with I/O filters, then remove %s";
@@ -16352,7 +16352,7 @@ main(void)
unsigned num_pb_fs = 1; /* The number of settings to test for page buffering and file space handling */
int ExpressMode; /* Express testing level */
const char *envval; /* Environment variable */
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
/* Don't run this test using certain file drivers */
@@ -16360,7 +16360,7 @@ main(void)
if (envval == NULL)
envval = "nomatch";
- /* Current VFD that does not support contigous address space */
+ /* Current VFD that does not support contiguous address space */
contig_addr_vfd = (hbool_t)(HDstrcmp(envval, "split") != 0 && HDstrcmp(envval, "multi") != 0);
/* Reset library */
diff --git a/test/file_image.c b/test/file_image.c
index 57f0ac6..5b794c8 100644
--- a/test/file_image.c
+++ b/test/file_image.c
@@ -145,7 +145,7 @@ test_properties(void)
/* Check that sizes are the same, and that the buffers are identical but separate */
VERIFY(temp_size == size, "Sizes of buffers don't match");
- VERIFY(temp2 != NULL, "Recieved buffer not set");
+ VERIFY(temp2 != NULL, "Received buffer not set");
VERIFY(temp2 != buffer, "Retrieved buffer is the same as original");
VERIFY(temp2 != temp, "Retrieved buffer is the same as previously retrieved buffer");
VERIFY(0 == HDmemcmp(temp2, buffer, size), "Buffers contain different data");
@@ -260,7 +260,7 @@ free_cb(void *ptr, H5FD_file_image_op_t op, void *udata)
* Function: udata_copy_cb
*
* Purpose: This function allows calls to the udata_copy callback to be tracked.
- * No copying actualy takes place; it is easier to deal with only one
+ * No copying actually takes place; it is easier to deal with only one
* instance of the udata.
*
* Returns: A pointer to the same udata that was passed in.
@@ -610,7 +610,7 @@ test_core(void)
VERIFY((udata->used_callbacks == MALLOC) || (udata->used_callbacks == (MALLOC | UDATA_COPY | UDATA_FREE)),
"opening a core file used the wrong callbacks");
VERIFY(udata->malloc_src == H5FD_FILE_IMAGE_OP_FILE_OPEN,
- "Malloc callback came from wrong sourc in core open");
+ "Malloc callback came from wrong source in core open");
/* Close file */
reset_udata(udata);
@@ -618,7 +618,7 @@ test_core(void)
VERIFY(ret >= 0, "H5Fclose failed");
VERIFY(udata->used_callbacks == FREE, "Closing a core file used the wrong callbacks");
VERIFY(udata->free_src == H5FD_FILE_IMAGE_OP_FILE_CLOSE,
- "Free callback came from wrong sourc in core close");
+ "Free callback came from wrong source in core close");
/* Reopen file */
file = H5Fopen(copied_filename, H5F_ACC_RDWR, fapl);
@@ -654,7 +654,7 @@ test_core(void)
VERIFY(ret >= 0, "H5Fclose failed");
VERIFY(udata->used_callbacks == (FREE), "Closing a core file used the wrong callbacks");
VERIFY(udata->free_src == H5FD_FILE_IMAGE_OP_FILE_CLOSE,
- "Free callback came from wrong sourc in core close");
+ "Free callback came from wrong source in core close");
/* Create file image buffer */
fd = HDopen(copied_filename, O_RDONLY);
@@ -954,7 +954,7 @@ test_get_file_image(const char *test_banner, const int file_name_num, hid_t fapl
err = H5Fclose(core_file_id);
VERIFY(err == SUCCEED, "H5Fclose(core_file_id) failed.");
- /* dicard core fapl */
+ /* discard core fapl */
err = H5Pclose(core_fapl_id);
VERIFY(err == SUCCEED, "H5Pclose(core_fapl_id) failed.");
@@ -1078,7 +1078,7 @@ test_get_file_image_error_rejection(void)
VERIFY(bytes_read < 0, "H5Fget_file_image(2 -- test 1) succeeded.");
/* Call H5Fget_file_image() with good buffer and buffer size,
- * but non-existant file_id. Should fail.
+ * but non-existent file_id. Should fail.
*/
H5E_BEGIN_TRY
{
diff --git a/test/fillval.c b/test/fillval.c
index be3ad14..a0ceac7 100644
--- a/test/fillval.c
+++ b/test/fillval.c
@@ -1918,7 +1918,7 @@ test_extend_cases(hid_t file, hid_t _dcpl, const char *dset_name, const hsize_t
if ((fspace = H5Dget_space(dset)) < 0)
TEST_ERROR
- /* Create dataspace for single element sized bufer */
+ /* Create dataspace for single element sized buffer */
if ((mspace = H5Screate_simple(5, one, NULL)) < 0)
TEST_ERROR
@@ -2142,7 +2142,7 @@ test_extend(hid_t fapl, const char *base_name, H5D_layout_t layout)
#if 1
/*
- * Remove this when contiguous datasets can be exended to some
+ * Remove this when contiguous datasets can be extended to some
* predetermined fininte size, even if it's just in the slowest varying
* dimension. If it's removed prematurely then you'll get one of the
* errors described above or `unable to select fill value region'.
diff --git a/test/filter_fail.c b/test/filter_fail.c
index cd264ee..64a3df2 100644
--- a/test/filter_fail.c
+++ b/test/filter_fail.c
@@ -160,7 +160,7 @@ test_filter_write(char *file_name, hid_t my_fapl, hbool_t cache_enabled)
points[i] = i;
/* Write data. If the chunk cache is enabled, H5Dwrite should succeed. If it is
- * diabled, H5Dwrite should fail. */
+ * disabled, H5Dwrite should fail. */
if (cache_enabled) {
if (H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, points) < 0)
TEST_ERROR
@@ -186,7 +186,7 @@ test_filter_write(char *file_name, hid_t my_fapl, hbool_t cache_enabled)
TEST_ERROR
/* Close dataset. If the chunk cache is enabled, the flushing of chunks should fail
- * during H5Dclose. If it is diabled, H5Dwrite should fail but H5Dclose should succeed. */
+ * during H5Dclose. If it is disabled, H5Dwrite should fail but H5Dclose should succeed. */
if (cache_enabled) {
H5E_BEGIN_TRY
{
diff --git a/test/filter_plugin.c b/test/filter_plugin.c
index ea5c000..50572c5 100644
--- a/test/filter_plugin.c
+++ b/test/filter_plugin.c
@@ -2,7 +2,7 @@
* Copyright by The HDF Group. *
* All rights reserved. *
* *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://www.hdfgroup.org/licenses. *
diff --git a/test/filter_plugin1_dsets.c b/test/filter_plugin1_dsets.c
index b22ba24..3a9e714 100644
--- a/test/filter_plugin1_dsets.c
+++ b/test/filter_plugin1_dsets.c
@@ -2,7 +2,7 @@
* Copyright by The HDF Group. *
* All rights reserved. *
* *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://www.hdfgroup.org/licenses. *
@@ -79,7 +79,7 @@ add_sub_value(unsigned int flags, size_t cd_nelmts, const unsigned int *cd_value
value = (int)cd_values[0];
if (flags & H5Z_FLAG_REVERSE) {
- /* READ - Substract the given value from all the data values */
+ /* READ - Subtract the given value from all the data values */
while (buf_left > 0) {
*int_ptr++ -= value;
buf_left -= sizeof(int);
diff --git a/test/filter_plugin2_dsets.c b/test/filter_plugin2_dsets.c
index fed8176..6e8c1e1 100644
--- a/test/filter_plugin2_dsets.c
+++ b/test/filter_plugin2_dsets.c
@@ -2,7 +2,7 @@
* Copyright by The HDF Group. *
* All rights reserved. *
* *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://www.hdfgroup.org/licenses. *
diff --git a/test/filter_plugin3_dsets.c b/test/filter_plugin3_dsets.c
index e8103a3..c901355 100644
--- a/test/filter_plugin3_dsets.c
+++ b/test/filter_plugin3_dsets.c
@@ -2,7 +2,7 @@
* Copyright by The HDF Group. *
* All rights reserved. *
* *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://www.hdfgroup.org/licenses. *
@@ -108,7 +108,7 @@ add_sub_value_hdf5(unsigned int flags, size_t cd_nelmts, const unsigned int *cd_
value = (int)cd_values[0];
if (flags & H5Z_FLAG_REVERSE) {
- /* READ - Substract the given value from all the data values */
+ /* READ - Subtract the given value from all the data values */
while (buf_left > 0) {
*int_ptr++ -= value;
buf_left -= sizeof(int);
diff --git a/test/filter_plugin4_groups.c b/test/filter_plugin4_groups.c
index 97cb59a..7e576a3 100644
--- a/test/filter_plugin4_groups.c
+++ b/test/filter_plugin4_groups.c
@@ -2,7 +2,7 @@
* Copyright by The HDF Group. *
* All rights reserved. *
* *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://www.hdfgroup.org/licenses. *
diff --git a/test/flushrefresh.c b/test/flushrefresh.c
index 0b894ef..08f2e9c 100644
--- a/test/flushrefresh.c
+++ b/test/flushrefresh.c
@@ -63,10 +63,10 @@
#define NOT_FLUSHED "NOT_FLUSHED"
/* Error Handling */
-/* For errors occuring in the main process, use the standard TEST_ERROR macro.
+/* For errors occurring in the main process, use the standard TEST_ERROR macro.
For errors occurring in the spawned process (from the test script), use
the PROCESS_ERROR macro, which will send a signal to the main process so the
- main process can propogate errors correctly. */
+ main process can propagate errors correctly. */
FILE *errorfile;
#define ERRFILE "flushrefresh_ERROR"
#define PROCESS_ERROR \
@@ -222,7 +222,7 @@ test_flush(void)
* object, a series of verifications will occur on each object in the file.
*
* Each verification consists of spawning off a new process and determining
- * if the object can be opened and its information retreived in said
+ * if the object can be opened and its information retrieved in said
* alternate process. It reports the results, which are compared to an
* expected value (either that the object can be found on disk, or that it
* cannot).
@@ -695,7 +695,7 @@ test_refresh(void)
* that an attribute has been added, and is only visible after a
* successful call to a H5*refresh function).
*
- * As with the flush case, the implemention is a bit tricky as it's
+ * As with the flush case, the implementation is a bit tricky as it's
* dealing with signals going back and forth between the two processes
* to ensure the timing is correct, but basically, an example:
*
@@ -1166,7 +1166,7 @@ error:
* Purpose: This function is used to communicate with the verification
* process spawned by the start_refresh_verification_process
* function. It gives it the go-ahead to call H5*refresh
- * on an object and conlcude the refresh verification.
+ * on an object and conclude the refresh verification.
*
* Return: 0 on Success, 1 on Failure
*
@@ -1360,7 +1360,7 @@ error:
*
* Purpose: This function checks the status of external verification
* processes to see if they've succeeded. It checks for the
- * existance of flushrefresh_ERROR file. If present, that indicates
+ * existence of flushrefresh_ERROR file. If present, that indicates
* an external verification process has failed, and this function
* thus fails as well. If not present, then nothing else has
* failed, and this function succeeds.
diff --git a/test/gen_new_array.c b/test/gen_new_array.c
index 5811106..8161c4e 100644
--- a/test/gen_new_array.c
+++ b/test/gen_new_array.c
@@ -89,7 +89,7 @@ main(void)
if (ret < 0)
printf("field 1 insert<0!\n");
- /* Creat the array datatype */
+ /* Create the array datatype */
arr_type = H5Tarray_create2(H5T_NATIVE_FLOAT, ARRAY1_RANK, tdims1);
if (arr_type < 0)
printf("arr_type<0!\n");
@@ -104,7 +104,7 @@ main(void)
if (ret < 0)
printf("field 3 array close<0!\n");
- /* Creat the array datatype */
+ /* Create the array datatype */
arr_type = H5Tarray_create2(H5T_NATIVE_LONG, ARRAY1_RANK, tdims1);
if (arr_type < 0)
printf("arr_type<0!\n");
diff --git a/test/gen_old_group.c b/test/gen_old_group.c
index 1b4e0b2..15e67a4 100644
--- a/test/gen_old_group.c
+++ b/test/gen_old_group.c
@@ -19,7 +19,7 @@
* empty "symbol table" group.
*
* This file is used in the v1.7 branch (after the "compact group"
- * checkin) to test compatability. Compile and run this
+ * checkin) to test compatibility. Compile and run this
* program (with the 1.6.x branch), it will generate a file*
* called "group_old.h5". You need to move it to the test
* directory in the HDF5 v1.7 source tree. The test/stab.c
diff --git a/test/gen_plist.c b/test/gen_plist.c
index 8e5e0f1..4840f52 100644
--- a/test/gen_plist.c
+++ b/test/gen_plist.c
@@ -81,7 +81,7 @@ main(void)
H5AC_cache_image_config_t my_cache_image_config = {H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION, TRUE, FALSE,
-1};
- /* check endianess */
+ /* check endianness */
{
short int word = 0x0001;
char *byte = (char *)&word;
diff --git a/test/genall5.c b/test/genall5.c
index 948e3f9..46d6bb0 100644
--- a/test/genall5.c
+++ b/test/genall5.c
@@ -14,7 +14,7 @@
/* Programmer: John Mainzer
* 9/23/15
*
- * This file contains a heavily edited and functionaly reduce
+ * This file contains a heavily edited and functionally reduce
* version of the test code first written by Quincey in a file
* of the same name.
*/
diff --git a/test/getname.c b/test/getname.c
index 30ab98d..b8b88c1 100644
--- a/test/getname.c
+++ b/test/getname.c
@@ -1526,7 +1526,7 @@ test_main(hid_t file_id, hid_t fapl)
{
char name[NAME_BUF_SIZE]; /* Buffer to hold name and its size */
- /* Get name for non commited datatype, it should fail */
+ /* Get name for non-commited datatype, it should fail */
H5E_BEGIN_TRY
{
if (H5Iget_name(type_id, name, NAME_BUF_SIZE) > 0)
diff --git a/test/gheap.c b/test/gheap.c
index 71b3bd1..7eb99d2 100644
--- a/test/gheap.c
+++ b/test/gheap.c
@@ -352,7 +352,7 @@ error:
* Function: test_4
*
* Purpose: Tests the H5HG_remove() feature by writing lots of objects
- * and occassionally removing some. When we're done they're all
+ * and occasionally removing some. When we're done they're all
* removed.
*
* Return: Success: 0
diff --git a/test/h5test.c b/test/h5test.c
index cdd9da8..6f89f5b 100644
--- a/test/h5test.c
+++ b/test/h5test.c
@@ -1406,7 +1406,7 @@ h5_szip_can_encode(void)
* further calls to getenv_all take place.
*
* Return: pointer to a string containing the value of the environment variable
- * NULL if the varialbe doesn't exist in task 'root's environment.
+ * NULL if the variable doesn't exist in task 'root's environment.
*
* Programmer: Leon Arber
* 4/4/05
@@ -1701,7 +1701,7 @@ h5_send_message(const char *send, const char *arg1, const char *arg2)
* signal file from disk, and only continues once it has
* successfully done so (i.e., only after another process has
* called the "h5_send_message" function to write the signal file).
- * This functon will then immediately remove the file (i.e.,
+ * This function will then immediately remove the file (i.e.,
* to indicate that it has been received and can be reused),
* and then exits, allowing the calling function to continue.
*
diff --git a/test/hdfs.c b/test/hdfs.c
index f1c33c4..0bb1aa0 100644
--- a/test/hdfs.c
+++ b/test/hdfs.c
@@ -52,7 +52,7 @@
* 4) Configurable expected-actual order in generated comparison strings.
* Some prefer `VERIFY(expected, actual)`, others
* `VERIFY(actual, expected)`. Provide preprocessor ifdef switch
- * to satifsy both parties, assuming one paradigm per test file.
+ * to satisfy both parties, assuming one paradigm per test file.
* (One could #undef and redefine the flag through the file as desired,
* but _why_.)
*
diff --git a/test/links.c b/test/links.c
index e6e385e..7f52ecf 100644
--- a/test/links.c
+++ b/test/links.c
@@ -3206,7 +3206,7 @@ external_link_dangling(hid_t fapl, hbool_t new_format)
if (status >= 0) {
H5_FAILED();
HDputs(
- " Retreiving name of object by index through dangling file external link should have failed.");
+ " Retrieving name of object by index through dangling file external link should have failed.");
} /* end if */
/* Close root group */
@@ -3679,7 +3679,7 @@ external_link_abstar(hid_t fapl, hbool_t new_format)
}
H5E_END_TRY;
- /* should be able to find the target file with abolute path */
+ /* should be able to find the target file with absolute path */
if (gid < 0) {
H5_FAILED();
HDputs(" Should have found the file in tmp_links directory.");
@@ -11572,7 +11572,7 @@ link_filters(hid_t fapl, hbool_t new_format)
TEST_ERROR
filesize_unfiltered = h5_get_file_size(filename, fapl);
- /* Set deflate fitler */
+ /* Set deflate filter */
if (H5Pset_deflate(fcpl, 6) < 0)
TEST_ERROR
@@ -17256,7 +17256,7 @@ main(void)
for (minimize_dset_oh = 0; minimize_dset_oh <= 1; minimize_dset_oh++) {
if (minimize_dset_oh) {
- HDprintf("\n-Testing with minimzed dataset object headers-\n");
+ HDprintf("\n-Testing with minimized dataset object headers-\n");
dcpl_g = H5Pcreate(H5P_DATASET_CREATE);
if (0 > dcpl_g)
TEST_ERROR
diff --git a/test/mf.c b/test/mf.c
index 70746f7..18bf854 100644
--- a/test/mf.c
+++ b/test/mf.c
@@ -222,7 +222,7 @@ test_mf_eoa(const char *env_h5_drvr, hid_t fapl)
haddr_t addr1, addr2;
haddr_t ma_addr = HADDR_UNDEF, new_ma_addr = HADDR_UNDEF;
hsize_t ma_size = 0;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MM_alloc() of file allocation");
@@ -370,7 +370,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
haddr_t addr = 0;
haddr_t ma_addr = HADDR_UNDEF, new_ma_addr = HADDR_UNDEF;
hsize_t ma_size = 0, new_ma_size = 0;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MF_try_shrink() of file allocation: test 1");
@@ -657,7 +657,7 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
htri_t was_extended;
haddr_t ma_addr = HADDR_UNDEF, new_ma_addr = HADDR_UNDEF;
hsize_t ma_size = 0, new_ma_size = 0;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MF_try_extend() of file allocation: test 1");
@@ -1933,7 +1933,7 @@ test_mf_fs_extend(hid_t fapl)
if (check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
- /* Remove section A from free-space manger */
+ /* Remove section A from free-space manager */
if (H5MF__find_sect(f, H5FD_MEM_SUPER, (hsize_t)(TBLOCK_SIZE30 - 10), f->shared->fs_man[H5FD_MEM_SUPER],
&tmp) != TRUE)
TEST_ERROR
@@ -2011,7 +2011,7 @@ test_mf_fs_absorb(const char *env_h5_drvr, hid_t fapl)
haddr_t ma_addr = HADDR_UNDEF;
hsize_t ma_size = 0;
H5MF_free_section_t *sect_node = NULL;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("A free-space section absorbs an aggregator: test 1");
@@ -2187,7 +2187,7 @@ test_mf_aggr_alloc1(const char *env_h5_drvr, hid_t fapl)
haddr_t addr1, addr2;
haddr_t ma_addr = HADDR_UNDEF;
hsize_t ma_size = 0;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MF_alloc() of meta/sdata aggregator:test 1");
@@ -2338,7 +2338,7 @@ test_mf_aggr_alloc2(const char *env_h5_drvr, hid_t fapl)
haddr_t addr1, addr2, addr3;
haddr_t ma_addr = HADDR_UNDEF;
hsize_t ma_size = 0;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MF_alloc() of meta/sdata aggregator:test 2");
@@ -2496,7 +2496,7 @@ test_mf_aggr_alloc3(const char *env_h5_drvr, hid_t fapl)
hsize_t ma_size = 0, new_ma_size = 0;
haddr_t sdata_addr = HADDR_UNDEF;
hsize_t sdata_size = 0;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MF_alloc() of meta/sdata aggregator: test 3");
@@ -2664,7 +2664,7 @@ test_mf_aggr_alloc4(const char *env_h5_drvr, hid_t fapl)
haddr_t addr1, addr2, saddr1, saddr2, saddr3;
haddr_t ma_addr = HADDR_UNDEF, new_ma_addr = HADDR_UNDEF, sdata_addr = HADDR_UNDEF;
hsize_t ma_size = 0, new_ma_size = 0, sdata_size = 0;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MF_alloc() of meta/sdata aggregator:test 4");
@@ -2813,7 +2813,7 @@ test_mf_aggr_alloc5(const char *env_h5_drvr, hid_t fapl)
haddr_t addr1, addr2, addr3;
haddr_t ma_addr = HADDR_UNDEF, new_ma_addr = HADDR_UNDEF;
hsize_t ma_size = 0, new_ma_size = 0;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MF_alloc() of meta/sdata aggregator:test 5");
@@ -2951,7 +2951,7 @@ test_mf_aggr_alloc6(const char *env_h5_drvr, hid_t fapl)
haddr_t ma_addr = HADDR_UNDEF, new_ma_addr = HADDR_UNDEF, sdata_addr = HADDR_UNDEF;
hsize_t ma_size = 0, new_ma_size = 0, sdata_size = 0;
H5FS_stat_t state;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MF_alloc() of meta/sdata aggregator:test 6");
@@ -3125,7 +3125,7 @@ test_mf_aggr_alloc7(const char *env_h5_drvr, hid_t fapl)
haddr_t ma_addr = HADDR_UNDEF, sdata_addr = HADDR_UNDEF;
hsize_t ma_size = 0, sdata_size = 0;
H5FS_stat_t state;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MF_alloc() of meta/sdata aggregator:test 7");
@@ -3296,7 +3296,7 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
haddr_t ma_addr = HADDR_UNDEF, new_ma_addr = HADDR_UNDEF, sdata_addr = HADDR_UNDEF;
hsize_t ma_size = 0, new_ma_size = 0, sdata_size = 0;
htri_t was_extended;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MF_try_extend() of meta/sdata aggregator: test 1");
@@ -3594,7 +3594,7 @@ test_mf_aggr_absorb(const char *env_h5_drvr, hid_t fapl)
haddr_t new_sdata_addr = HADDR_UNDEF;
hsize_t ma_size = 0, new_ma_size = 0;
hsize_t sdata_size = 0, new_sdata_size = 0;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
TESTING("H5MF_try_shrink() of meta/sdata aggregator: test 1");
@@ -5837,7 +5837,7 @@ error:
*
* Alignment = 4096 aggr->alloc_size = 2048
*
- * Allocate first block (30) from meta_aggr: (meta_aggr is emtpy)
+ * Allocate first block (30) from meta_aggr: (meta_aggr is empty)
* request-size is > meta_aggr->size and < meta_aggr->alloc_size
* Result:
* A block of meta_aggr->alloc_size is allocated from file allocation
@@ -7113,7 +7113,7 @@ test_mf_fs_gone(const char *env_h5_drvr, hid_t fapl, hbool_t new_format)
else
TESTING("File's free-space is going away with old library format")
- /* Current VFD that does not support contigous address space */
+ /* Current VFD that does not support contiguous address space */
contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") != 0 && HDstrcmp(env_h5_drvr, "multi") != 0);
if (contig_addr_vfd) {
@@ -7314,7 +7314,7 @@ test_mf_strat_thres_persist(const char *env_h5_drvr, hid_t fapl, hbool_t new_for
else
TESTING("File space strategy/persisting/threshold with old library format")
- /* Current VFD that does not support contigous address space */
+ /* Current VFD that does not support contiguous address space */
contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") != 0 && HDstrcmp(env_h5_drvr, "multi") != 0);
/* Set the filename to use for this test (dependent on fapl) */
@@ -7484,7 +7484,7 @@ test_mf_strat_thres_gone(const char *env_h5_drvr, hid_t fapl, hbool_t new_format
else
TESTING("File space merge/shrink for section size < threshold with old library format")
- /* Current VFD that does not support contigous address space */
+ /* Current VFD that does not support contiguous address space */
contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") != 0 && HDstrcmp(env_h5_drvr, "multi") != 0);
/* Set the filename to use for this test (dependent on fapl) */
@@ -8045,7 +8045,7 @@ test_page_try_shrink(const char *env_h5_drvr, hid_t fapl)
haddr_t addr1; /* Address for small metadata block */
haddr_t saddr1; /* Address for small raw data block */
haddr_t gaddr1; /* Address for large data block */
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
htri_t status; /* status from shrinking */
h5_stat_size_t file_size; /* File size */
char filename[FILENAME_LEN]; /* Filename to use */
@@ -8175,7 +8175,7 @@ test_page_small_try_extend(const char *env_h5_drvr, hid_t fapl)
H5F_t *f = NULL; /* Internal file object pointer */
haddr_t addr1, addr2, addr3; /* Addresses for small metadata blocks */
haddr_t saddr1; /* Address for small raw data block */
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
htri_t was_extended; /* Whether the block can be extended or not */
char filename[FILENAME_LEN]; /* Filename to use */
@@ -8354,7 +8354,7 @@ test_page_large_try_extend(const char *env_h5_drvr, hid_t fapl)
hid_t fcpl = -1; /* File creation property list */
H5F_t *f = NULL; /* Internal file object pointer */
haddr_t gaddr1, gaddr2, gaddr3, gaddr4; /* Addresses for large data blocks */
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
htri_t was_extended; /* Whether the block can be extended or not */
char filename[FILENAME_LEN]; /* Filename to use */
@@ -8517,7 +8517,7 @@ test_page_large(const char *env_h5_drvr, hid_t fapl)
hid_t fcpl = -1; /* File creation property list ID */
H5F_t *f = NULL; /* Internal file object pointer */
haddr_t gaddr1, gaddr2, gaddr3, gaddr4; /* Addresses for blocks */
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
H5FS_stat_t fs_stat; /* Information for free-space manager */
h5_stat_size_t file_size; /* File size */
char filename[FILENAME_LEN]; /* Filename to use */
diff --git a/test/mirror_vfd.c b/test/mirror_vfd.c
index 7d838c6..c6e9ef0 100644
--- a/test/mirror_vfd.c
+++ b/test/mirror_vfd.c
@@ -1496,7 +1496,7 @@ create_datasets(hid_t file_id, unsigned min_dset, unsigned max_dset)
}
}
- /* select on disk hyperslab */
+ /* Select on disk hyperslab */
offset[0] = (hsize_t)i;
offset[1] = (hsize_t)j;
LOGPRINT(3, " H5Sselect_hyperslab()\n");
diff --git a/test/mount.c b/test/mount.c
index 1841889..53379b6 100644
--- a/test/mount.c
+++ b/test/mount.c
@@ -726,7 +726,7 @@ test_move(hid_t fapl)
H5E_END_TRY;
if (status >= 0) {
H5_FAILED();
- HDputs(" Moving an object across files should't have been possible");
+ HDputs(" Moving an object across files shouldn't have been possible");
TEST_ERROR
} /* end if */
@@ -1808,7 +1808,7 @@ test_mount_after_unmount(hid_t fapl)
if (HDstrcmp(objname, "/X/M/Y") != 0)
TEST_ERROR
- /* Rename object in file #3 that is "disconnected" from name hiearchy */
+ /* Rename object in file #3 that is "disconnected" from name hierarchy */
/* (It is "disconnected" because it's parent file has been unmounted) */
if (H5Lmove(gidAMX, "M/Y", gidAMX, "M/Z", H5P_DEFAULT, H5P_DEFAULT) < 0)
TEST_ERROR
diff --git a/test/mtime.c b/test/mtime.c
index 255ab5d..3e51185 100644
--- a/test/mtime.c
+++ b/test/mtime.c
@@ -113,7 +113,7 @@ main(void)
if (0 == oi1.ctime) {
SKIPPED();
HDputs(" The modification time could not be decoded on this OS.");
- HDputs(" Modification times will be mantained in the file but");
+ HDputs(" Modification times will be maintained in the file but");
HDputs(" cannot be queried on this system. See H5O_mtime_decode().");
return 0;
}
diff --git a/test/objcopy.c b/test/objcopy.c
index 9b055b6..aacc4a0 100644
--- a/test/objcopy.c
+++ b/test/objcopy.c
@@ -2750,7 +2750,7 @@ test_copy_dataset_simple_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, h
char dst_filename[NAME_BUF_SIZE];
if (test_open) {
- TESTING("H5Ocopy(): empty and openend contiguous dataset");
+ TESTING("H5Ocopy(): empty and opened contiguous dataset");
}
else {
TESTING("H5Ocopy(): empty contiguous dataset");
@@ -3953,7 +3953,7 @@ test_copy_dataset_chunked_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
if ((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED3, H5P_DEFAULT)) < 0)
TEST_ERROR
- /* open the copied dataset NAME_DATASET_CHUNKED3 at destinaion */
+ /* open the copied dataset NAME_DATASET_CHUNKED3 at destination */
if ((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED3, H5P_DEFAULT)) < 0)
TEST_ERROR
@@ -8633,7 +8633,7 @@ test_copy_soft_link(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst_fa
char dst_filename[NAME_BUF_SIZE];
if (test_open) {
- TESTING("H5Ocopy(): openend object through soft link");
+ TESTING("H5Ocopy(): opened object through soft link");
}
else {
TESTING("H5Ocopy(): object through soft link");
@@ -9620,7 +9620,7 @@ test_copy_dataset_contig_named_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl
char src_filename[NAME_BUF_SIZE];
char dst_filename[NAME_BUF_SIZE];
- TESTING("H5Ocopy(): contigous dataset with named VLEN datatype");
+ TESTING("H5Ocopy(): contiguous dataset with named VLEN datatype");
/* set initial data values */
for (i = 0; i < DIM_SIZE_1; i++) {
@@ -10380,10 +10380,10 @@ test_copy_dataset_contig_vl_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, h
char dst_filename[NAME_BUF_SIZE];
if (test_open) {
- TESTING("H5Ocopy(): contigous and opened dataset with nested VLEN datatype");
+ TESTING("H5Ocopy(): contiguous and opened dataset with nested VLEN datatype");
}
else {
- TESTING("H5Ocopy(): contigous dataset with nested VLEN datatype");
+ TESTING("H5Ocopy(): contiguous dataset with nested VLEN datatype");
}
/* set initial data values */
@@ -11009,7 +11009,7 @@ typedef struct cmpd_vl_t {
* Failure: number of errors
*
* Programmer: Neil Fortner
- * Tuseday, September 29, 2009
+ * Tuesday, September 29, 2009
*
*-------------------------------------------------------------------------
*/
@@ -13658,7 +13658,7 @@ test_copy_cdt_hier_merge(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t d
TEST_ERROR
/*
- * Test 3: copy datsets in /g0 at SRC to DST group /uncopied
+ * Test 3: copy datasets in /g0 at SRC to DST group /uncopied
*/
if (H5Ocopy(fid_src, NAME_GROUP_TOP "/" SRC_DSET1, fid_dst, NAME_GROUP_UNCOPIED "/" SRC_DSET1, ocpypl_id,
H5P_DEFAULT) < 0)
diff --git a/test/ohdr.c b/test/ohdr.c
index bf24298..1b41d0d 100644
--- a/test/ohdr.c
+++ b/test/ohdr.c
@@ -583,7 +583,7 @@ test_unknown(unsigned bogus_id, char *filename, hid_t fapl)
TESTING("object with unknown header message & 'shareable' flag set");
- /* Open the dataset with the unknown header message, adn "shareable" flag */
+ /* Open the dataset with the unknown header message, and "shareable" flag */
if ((did = H5Dopen2(loc_bogus, "Dataset5", H5P_DEFAULT)) < 0)
FAIL_STACK_ERROR
if (H5Dclose(did) < 0)
@@ -1088,13 +1088,13 @@ test_minimized_dset_ohdr_size_comparisons(hid_t fapl_id)
/* IDs for non-minimized file open */
hid_t file_f_id = -1; /* lower 'f' for standard file setting */
hid_t dset_f_x_id = -1; /* 'x' for default */
- hid_t dset_f_N_id = -1; /* 'N' for explcit non-minimized dset */
+ hid_t dset_f_N_id = -1; /* 'N' for explicit non-minimized dset */
hid_t dset_f_Y_id = -1; /* 'Y' for minimized dset */
/* IDs for minimized file open */
hid_t file_F_id = -1; /* upper 'F' for minimized file setting */
hid_t dset_F_x_id = -1; /* 'x' for default */
- hid_t dset_F_N_id = -1; /* 'N' for explcit non-minimized dset */
+ hid_t dset_F_N_id = -1; /* 'N' for explicit non-minimized dset */
hid_t dset_F_Y_id = -1; /* 'Y' for minimized dset */
char filename_a[512] = "";
diff --git a/test/page_buffer.c b/test/page_buffer.c
index 874aea7..f31580f 100644
--- a/test/page_buffer.c
+++ b/test/page_buffer.c
@@ -74,7 +74,7 @@ const char *FILENAME[] = {"filepaged", NULL};
* Any data mis-matches or failures reported by the HDF5
* library result in test failure.
*
- * Return: 0 if test is sucessful
+ * Return: 0 if test is successful
* 1 if test fails
*
* Programmer: unknown
@@ -214,7 +214,7 @@ error:
* Any data mis-matches or failures reported by the HDF5
* library result in test failure.
*
- * Return: 0 if test is sucessful
+ * Return: 0 if test is successful
* 1 if test fails
*
* Programmer: unknown
@@ -369,12 +369,12 @@ error:
* 1) verifying that API errors are caught.
*
* 2) verifying that the page buffer behaves more or less
- * as advertized.
+ * as advertised.
*
* Any data mis-matches or unexpected failures or successes
* reported by the HDF5 library result in test failure.
*
- * Return: 0 if test is sucessful
+ * Return: 0 if test is successful
* 1 if test fails
*
* Programmer: unknown
@@ -548,7 +548,7 @@ error:
* Any data mis-matches or failures reported by the HDF5
* library result in test failure.
*
- * Return: 0 if test is sucessful
+ * Return: 0 if test is successful
* 1 if test fails
*
* Programmer: unknown
@@ -826,7 +826,7 @@ error:
* Any data mis-matches or failures reported by the HDF5
* library result in test failure.
*
- * Return: 0 if test is sucessful
+ * Return: 0 if test is successful
* 1 if test fails
*
* Programmer: unknown
@@ -1080,7 +1080,7 @@ error:
* Any data mis-matches or failures reported by the HDF5
* library result in test failure.
*
- * Return: 0 if test is sucessful
+ * Return: 0 if test is successful
* 1 if test fails
*
* Programmer: unknown
@@ -1501,7 +1501,7 @@ test_min_threshold(hid_t orig_fapl, const char *env_h5_drvr)
if (f->shared->page_buf->raw_count != 2)
TEST_ERROR;
- /* adding more meta entires should replace meta entries since raw data
+ /* adding more meta entries should replace meta entries since raw data
* is at its minimum
*/
if (H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 600), sizeof(int) * 100, data) < 0)
@@ -1516,7 +1516,7 @@ test_min_threshold(hid_t orig_fapl, const char *env_h5_drvr)
if (f->shared->page_buf->raw_count != 2)
TEST_ERROR;
- /* bring existing raw entires up the LRU */
+ /* bring existing raw entries up the LRU */
if (H5F_block_read(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 750), sizeof(int) * 100, data) < 0)
FAIL_STACK_ERROR;
@@ -1532,7 +1532,7 @@ test_min_threshold(hid_t orig_fapl, const char *env_h5_drvr)
if (f->shared->page_buf->raw_count != 3)
TEST_ERROR;
- /* adding 2 meta entries should replace 2 entires at the bottom of the LRU */
+ /* adding 2 meta entries should replace 2 entries at the bottom of the LRU */
if (H5F_block_read(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 98), sizeof(int) * 100, data) < 0)
FAIL_STACK_ERROR;
@@ -1715,7 +1715,7 @@ error:
* Any data mis-matches or failures reported by the HDF5
* library result in test failure.
*
- * Return: 0 if test is sucessful
+ * Return: 0 if test is successful
* 1 if test fails
*
* Programmer: unknown
@@ -2018,7 +2018,7 @@ error:
* At present, page buffering should be disabled in parallel
* builds. Verify this.
*
- * Return: 0 if test is sucessful
+ * Return: 0 if test is successful
* 1 if test fails
*
* Programmer: John Mainzer
@@ -2133,7 +2133,7 @@ error:
*
* Purpose: Main function for the page buffer tests.
*
- * Return: 0 if test is sucessful
+ * Return: 0 if test is successful
* 1 if test fails
*
* Programmer: unknown
diff --git a/test/reserved.c b/test/reserved.c
index 6e41a16..c5c76e8 100644
--- a/test/reserved.c
+++ b/test/reserved.c
@@ -479,7 +479,7 @@ error:
int
main(void)
{
- /* This test is currently not working properly; it should be revisted
+ /* This test is currently not working properly; it should be revisited
* when we have time.
*
* (Also, we should try to make this test work with all the VFDs)
@@ -509,8 +509,7 @@ main(void)
return num_errs;
}
else {
- HDputs(
- "All address space reservation tests skippped - Incompatible with current Virtual File Driver");
+ HDputs("All address space reservation tests skipped - Incompatible with current Virtual File Driver");
}
#endif /* BROKEN */
diff --git a/test/ros3.c b/test/ros3.c
index c852016..03d2ae9 100644
--- a/test/ros3.c
+++ b/test/ros3.c
@@ -56,7 +56,7 @@
* 4) Configurable expected-actual order in generated comparison strings.
* Some prefer `VERIFY(expected, actual)`, others
* `VERIFY(actual, expected)`. Provide preprocessor ifdef switch
- * to satifsy both parties, assuming one paradigm per test file.
+ * to satisfy both parties, assuming one paradigm per test file.
* (One could #undef and redefine the flag through the file as desired,
* but _why_.)
*
@@ -1444,7 +1444,7 @@ test_noops_and_autofails(void)
JSVERIFY(SUCCEED, H5FDlock(file, FALSE), NULL)
JSVERIFY(SUCCEED, H5FDunlock(file), NULL)
/* Lock/unlock with null file or similar error crashes tests.
- * HDassert in calling heirarchy, `H5FD[un]lock()` and `H5FD_[un]lock()`
+ * HDassert in calling hierarchy, `H5FD[un]lock()` and `H5FD_[un]lock()`
*/
/************
@@ -1760,7 +1760,7 @@ main(void)
bucket_url_env = HDgetenv("HDF5_ROS3_TEST_BUCKET_URL");
if (bucket_url_env == NULL || bucket_url_env[0] == '\0') {
- HDprintf("WARNING: S3 bucket url is not defined in enviornment "
+ HDprintf("WARNING: S3 bucket url is not defined in environment "
"variable 'HDF5_ROS3_TEST_BUCKET_URL'!\n");
}
else {
diff --git a/test/s3comms.c b/test/s3comms.c
index 1cc47b7..ac768d6 100644
--- a/test/s3comms.c
+++ b/test/s3comms.c
@@ -47,7 +47,7 @@
* 4) Configurable expected-actual order in generated comparison strings.
* Some prefer `VERIFY(expected, actual)`, others
* `VERIFY(actual, expected)`. Provide preprocessor ifdef switch
- * to satifsy both parties, assuming one paradigm per test file.
+ * to satisfy both parties, assuming one paradigm per test file.
* (One could #undef and redefine the flag through the file as desired,
* but _why_.)
* Provided as courtesy, per consideration for inclusion in the library
@@ -232,7 +232,7 @@
}
#ifdef JSVERIFY_EXP_ACT
-/* VERIFY rountines with paramter order (<expected>, <actual> [, <msg> ])
+/* VERIFY routines with parameter order (<expected>, <actual> [, <msg> ])
*/
/*----------------------------------------------------------------------------
@@ -423,7 +423,7 @@ error:
*
* Purpose:
*
- * Demonstrate the construction of a Canoncial Request (and Signed Headers)
+ * Demonstrate the construction of a Canonical Request (and Signed Headers)
*
* Elided / not yet implemented:
* Query strings
@@ -718,7 +718,7 @@ test_hrb_init_request(void)
FALSE,
},
{
- "null verb substitues to GET",
+ "null verb substitutes to GET",
NULL,
"/MYPATH/MYFILE.tiff",
"/MYPATH/MYFILE.tiff",
@@ -1131,10 +1131,10 @@ test_hrb_node_set(void)
*********/
for (mock_i = 0; test->given[mock_i] != NULL; mock_i += 2) {
- const char *name = test->given[mock_i];
- const char *valu = test->given[mock_i + 1];
+ const char *name = test->given[mock_i];
+ const char *value = test->given[mock_i + 1];
- FAIL_IF(SUCCEED != H5FD_s3comms_hrb_node_set(&list, name, valu))
+ FAIL_IF(SUCCEED != H5FD_s3comms_hrb_node_set(&list, name, value))
}
/********
* TEST *
@@ -1150,11 +1150,11 @@ test_hrb_node_set(void)
node = list;
mock_i = 0;
while (test->expected[mock_i] != NULL && node != NULL) {
- const char *name = test->expected[mock_i];
- const char *valu = test->expected[mock_i + 1];
+ const char *name = test->expected[mock_i];
+ const char *value = test->expected[mock_i + 1];
JSVERIFY_STR(name, node->name, NULL)
- JSVERIFY_STR(valu, node->value, NULL)
+ JSVERIFY_STR(value, node->value, NULL)
mock_i += 2;
node = node->next;
@@ -1362,7 +1362,7 @@ test_nlowercase(void)
HDfree(dest);
} /* end for each testcase */
- JSVERIFY(FAIL, H5FD_s3comms_nlowercase(NULL, cases[0].in, cases[0].len), "null distination should fail")
+ JSVERIFY(FAIL, H5FD_s3comms_nlowercase(NULL, cases[0].in, cases[0].len), "null destination should fail")
PASSED();
return 0;
@@ -1547,7 +1547,7 @@ test_parse_url(void)
NULL,
"a=b&d=b",
},
- "QUERY with implict PATH",
+ "QUERY with implicit PATH",
},
{
"http://[5]/path?a=b&d=b",
@@ -2002,7 +2002,7 @@ error:
* H5FD_s3comms_s3r_read << called by getsize(), multiple times working
* H5FD_s3comms_s3r_close
*
- * Shows most basic curl interation.
+ * Shows most basic curl iteration.
*
* Programmer: Jacob Smith
* 2017-10-06
@@ -2598,7 +2598,7 @@ main(void)
bucket_url_env = HDgetenv("HDF5_ROS3_TEST_BUCKET_URL");
if (bucket_url_env == NULL || bucket_url_env[0] == '\0') {
- HDprintf("WARNING: S3 bucket url is not defined in enviornment "
+ HDprintf("WARNING: S3 bucket url is not defined in environment "
"variable 'HDF5_ROS3_TEST_BUCKET_URL'!\n");
}
else {
@@ -2606,7 +2606,7 @@ main(void)
s3_test_bucket_defined = TRUE;
}
- /* tests ordered rougly by dependence */
+ /* tests ordered roughly by dependence */
nerrors += test_macro_format_credential() < 0 ? 1 : 0;
nerrors += test_trim() < 0 ? 1 : 0;
nerrors += test_nlowercase() < 0 ? 1 : 0;
diff --git a/test/set_extent.c b/test/set_extent.c
index dca41c1..daa9c18 100644
--- a/test/set_extent.c
+++ b/test/set_extent.c
@@ -115,12 +115,12 @@ main(void)
unsigned chunk_cache; /* Whether to enable chunk caching */
int nerrors = 0;
const char *env_h5_drvr; /* File Driver value from environment */
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
env_h5_drvr = HDgetenv("HDF5_DRIVER");
if (env_h5_drvr == NULL)
env_h5_drvr = "nomatch";
- /* Current VFD that does not support contigous address space */
+ /* Current VFD that does not support contiguous address space */
contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") != 0 && HDstrcmp(env_h5_drvr, "multi") != 0);
/* Initialize random number seed */
@@ -649,7 +649,7 @@ test_rank1(hid_t fapl, hid_t dcpl, hbool_t do_fill_value, hbool_t disable_edge_f
HDprintf("\n");
#endif
- /* compare the read array with the shrinked array */
+ /* compare the read array with the shrunk array */
for (i = 0; i < (int)dims_r[0]; i++)
if (buf_s[i] != buf_o[i]) {
HDprintf("buf_s[%d] = %d\n", i, buf_s[i]);
@@ -1055,7 +1055,7 @@ test_rank2(hid_t fapl, hid_t dcpl, hbool_t do_fill_value, hbool_t disable_edge_f
}
#endif
- /* compare the read array with the shrinked array */
+ /* compare the read array with the shrunk array */
for (i = 0; i < (int)dims_r[0]; i++) {
for (j = 0; j < (int)dims_r[1]; j++) {
if (buf_s[i][j] != buf_o[i][j]) {
@@ -1536,7 +1536,7 @@ test_rank3(hid_t fapl, hid_t dcpl, hbool_t do_fill_value, hbool_t disable_edge_f
HDprintf("\n");
#endif
- /* compare the read array with the shrinked array */
+ /* compare the read array with the shrunk array */
for (i = 0; i < (int)dims_r[0]; i++) {
for (j = 0; j < (int)dims_r[1]; j++) {
for (k = 0; k < (int)dims_r[2]; k++) {
@@ -1734,7 +1734,7 @@ test_external(hid_t fapl)
hsize_t dims_r[RANK2]; /* read dimensions */
hsize_t maxdims[RANK2] = {DIME0, DIM1}; /* only the first dimension can be extendible */
int buf_o[DIM0][DIM1]; /* original buffer, for writing */
- int buf_s[DIMS0][DIMS1]; /* shrinked buffer, for reading */
+ int buf_s[DIMS0][DIMS1]; /* shrunk buffer, for reading */
int buf_e[DIME0][DIM1]; /* extended buffer, for writing, dimension 1 is the original */
int buf_ro[DIM0][DIM1]; /* original buffer for reading */
int i, j;
@@ -1946,7 +1946,7 @@ test_external(hid_t fapl)
}
#endif
- /* compare the read array with the shrinked array */
+ /* compare the read array with the shrunk array */
for (i = 0; i < (int)dims_r[0]; i++) {
for (j = 0; j < (int)dims_r[1]; j++) {
if (buf_s[i][j] != buf_o[i][j]) {
diff --git a/test/stab.c b/test/stab.c
index 50c3a3f..9bdde1a 100644
--- a/test/stab.c
+++ b/test/stab.c
@@ -125,7 +125,7 @@ test_misc(hid_t fcpl, hid_t fapl, hbool_t new_format)
if (H5Gclose(g3) < 0)
TEST_ERROR
- /* Open all groups with absolute names to check for exsistence */
+ /* Open all groups with absolute names to check for existence */
if ((g1 = H5Gopen2(fid, "/test_1a", H5P_DEFAULT)) < 0)
TEST_ERROR
if ((g2 = H5Gopen2(fid, "/test_1a/sub_1", H5P_DEFAULT)) < 0)
@@ -1416,7 +1416,7 @@ main(void)
hid_t fcpl, fcpl2; /* File creation property list ID */
unsigned new_format; /* Whether to use the new format or not */
const char *env_h5_drvr; /* File Driver value from environment */
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
int nerrors = 0;
/* Get the VFD to use */
@@ -1424,7 +1424,7 @@ main(void)
if (env_h5_drvr == NULL)
env_h5_drvr = "nomatch";
- /* VFD that does not support contigous address space */
+ /* VFD that does not support contiguous address space */
contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") != 0 && HDstrcmp(env_h5_drvr, "multi") != 0);
/* Reset library */
diff --git a/test/swmr.c b/test/swmr.c
index 7dd5e75..f290ffc 100644
--- a/test/swmr.c
+++ b/test/swmr.c
@@ -4037,7 +4037,7 @@ test_append_flush_dataset_chunked(hid_t in_fapl)
{
hid_t fid = -1; /* file ID */
hid_t fapl = -1; /* A copy of file access property */
- hid_t did1 = -1, did2 = -1; /* The datset ID */
+ hid_t did1 = -1, did2 = -1; /* The dataset ID */
hid_t sid = -1; /* The dataspace ID */
hid_t dcpl = -1; /* A copy of dataset creation property */
hid_t dapl = -1; /* A copy of dataset access property */
@@ -4260,7 +4260,7 @@ test_append_flush_dataset_fixed(hid_t in_fapl)
{
hid_t fid = -1; /* file ID */
hid_t fapl = -1; /* A copy of file access property */
- hid_t did1 = -1, did2 = -1; /* The datset ID */
+ hid_t did1 = -1, did2 = -1; /* The dataset ID */
hid_t sid = -1; /* The dataspace ID */
hid_t dapl = -1; /* A copy of dataset access property */
hid_t ddapl = -1; /* The dataset access property of the opened dataset */
@@ -4478,7 +4478,7 @@ test_append_flush_dataset_multiple(hid_t in_fapl)
{
hid_t fid = -1; /* file ID */
hid_t fapl = -1; /* A copy of file access property */
- hid_t did1 = -1, did2 = -1; /* The datset ID */
+ hid_t did1 = -1, did2 = -1; /* The dataset ID */
hid_t sid = -1; /* The dataspace ID */
hid_t dcpl = -1; /* A copy of dataset creation property */
hid_t dapl1 = -1; /* A copy of dataset access property */
@@ -7785,7 +7785,7 @@ main(void)
/*
* Modify the following routines to test for files:
* H5Fcreate(write, latest format) or H5Fcreate(SWMR write, non-latest-format)
- * --both result in v3 superblock and latest version suppport
+ * --both result in v3 superblock and latest version support
*/
nerrors += test_start_swmr_write(fapl, TRUE);
nerrors += test_start_swmr_write(fapl, FALSE);
@@ -7820,7 +7820,7 @@ main(void)
*
* Modify the following 2 routines to test for files:
* H5Fcreate(write, latest format) or H5Fcreate(SWMR write, non-latest-format)
- * --both result in v3 superblock and latest version suppport
+ * --both result in v3 superblock and latest version support
*/
nerrors += test_file_lock_swmr_same(fapl);
nerrors += test_file_lock_swmr_concur(fapl);
diff --git a/test/swmr_common.c b/test/swmr_common.c
index 2201427..2665969 100644
--- a/test/swmr_common.c
+++ b/test/swmr_common.c
@@ -187,7 +187,7 @@ generate_name(char *name_buf, unsigned level, unsigned count)
/*-------------------------------------------------------------------------
* Function: generate_symbols
*
- * Purpose: Initializes the global dataset infomration arrays.
+ * Purpose: Initializes the global dataset information arrays.
*
* Parameters: N/A
*
diff --git a/test/swmr_generator.c b/test/swmr_generator.c
index a455f3f..93cfa0f 100644
--- a/test/swmr_generator.c
+++ b/test/swmr_generator.c
@@ -221,7 +221,7 @@ gen_skeleton(const char *filename, hbool_t verbose, hbool_t swmr_write, int comp
if (verbose)
HDfprintf(stderr, "Closing objects\n");
- /* Close everythign */
+ /* Close everything */
if (H5Pclose(dcpl) < 0)
return -1;
if (H5Sclose(sid) < 0)
diff --git a/test/swmr_reader.c b/test/swmr_reader.c
index 0566613..3369562 100644
--- a/test/swmr_reader.c
+++ b/test/swmr_reader.c
@@ -275,7 +275,7 @@ read_records(const char *filename, hbool_t verbose, FILE *verbose_file, unsigned
if ((fapl = h5_fileaccess()) < 0)
return -1;
- /* Log I/O when verbose output it enbabled */
+ /* Log I/O when verbose output it enabled */
if (use_log_vfd) {
char verbose_name[1024];
diff --git a/test/swmr_sparse_writer.c b/test/swmr_sparse_writer.c
index 96eff6c..0c8e391 100644
--- a/test/swmr_sparse_writer.c
+++ b/test/swmr_sparse_writer.c
@@ -149,7 +149,7 @@ add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long f
symbol_t record; /* The record to add to the dataset */
unsigned long rec_to_flush; /* # of records left to write before flush */
#ifdef OUT
- volatile int dummy; /* Dummy varialbe for busy sleep */
+ volatile int dummy; /* Dummy variable for busy sleep */
#endif /* OUT */
hsize_t dim[2] = {1, 0}; /* Dataspace dimensions */
unsigned long u, v; /* Local index variables */
diff --git a/test/swmr_start_write.c b/test/swmr_start_write.c
index 38d6aa4..54ea363 100644
--- a/test/swmr_start_write.c
+++ b/test/swmr_start_write.c
@@ -125,7 +125,7 @@ create_file(const char *filename, hbool_t verbose, FILE *verbose_file, unsigned
*
* Parameters:
* fid: file ID for the SWMR test file
- * comp_level: the compresssion level
+ * comp_level: the compression level
* index_type: The chunk index type (b1 | b2 | ea | fa)
* verbose: whether verbose console output is desired.
* verbose_file: file pointer for verbose output
diff --git a/test/tattr.c b/test/tattr.c
index ab33881..8ca959b 100644
--- a/test/tattr.c
+++ b/test/tattr.c
@@ -5847,7 +5847,7 @@ test_attr_info_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
is_dense = H5O__is_attr_dense_test(my_dataset);
VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
- /* Check for query on non-existant attribute */
+ /* Check for query on non-existent attribute */
ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, &ainfo,
H5P_DEFAULT);
VERIFY(ret, FAIL, "H5Aget_info_by_idx");
@@ -6109,7 +6109,7 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
is_dense = H5O__is_attr_dense_test(my_dataset);
VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
- /* Check for deleting non-existant attribute */
+ /* Check for deleting non-existent attribute */
ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT);
VERIFY(ret, FAIL, "H5Adelete_by_idx");
@@ -7761,7 +7761,7 @@ test_attr_open_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl)
is_dense = H5O__is_attr_dense_test(my_dataset);
VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
- /* Check for opening a non-existant attribute on an object with no attributes */
+ /* Check for opening a non-existent attribute on an object with no attributes */
ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT);
VERIFY(ret_id, FAIL, "H5Aopen");
@@ -7800,7 +7800,7 @@ test_attr_open_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl)
is_dense = H5O__is_attr_dense_test(my_dataset);
VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
- /* Check for opening a non-existant attribute on an object with compact attribute storage */
+ /* Check for opening a non-existent attribute on an object with compact attribute storage */
ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT);
VERIFY(ret_id, FAIL, "H5Aopen");
@@ -7881,7 +7881,7 @@ test_attr_open_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl)
VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test");
} /* end if */
- /* Check for opening a non-existant attribute on an object with dense attribute storage */
+ /* Check for opening a non-existent attribute on an object with dense attribute storage */
ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT);
VERIFY(ret_id, FAIL, "H5Aopen");
@@ -10743,7 +10743,7 @@ test_attr_bug8(hid_t fcpl, hid_t fapl)
** chunk. Do this twice: once with only attributes in the
** object header chunk and once with a (small) soft link in
** the chunk as well. In both cases, the shrunk chunk will
-** initally be too small and a new NULL message must be
+** initially be too small and a new NULL message must be
** created.
**
****************************************************************/
diff --git a/test/tcoords.c b/test/tcoords.c
index 7273677..37c1c1a 100644
--- a/test/tcoords.c
+++ b/test/tcoords.c
@@ -17,7 +17,7 @@
*
* Test the element coordinates for dataspace selection. For
* chunked dataset, when the hyperslab selection of some
- * dimensions is full, the library optimize it by "flattenning"
+ * dimensions is full, the library optimize it by "flattening"
* the fully selected dimensions. This program tests if the
* coordinates of selected elements are correctly calculated.
*
diff --git a/test/testmeta.c b/test/testmeta.c
index c59c6cb..6c67f06 100644
--- a/test/testmeta.c
+++ b/test/testmeta.c
@@ -15,7 +15,7 @@
* This program illustrates assertion errors when linked
* to HDF5 1.4.1-post2 or 1.4.2-pre3 debug library.
*
- * If the assertion errors are ignored, the program eventially causes
+ * If the assertion errors are ignored, the program eventually causes
* an error in H5Gcreate2 when writing object 83381.
*
* When writing in single file mode, the assertion errors still occur
diff --git a/test/tfile.c b/test/tfile.c
index ee8ca02..329d9a5 100644
--- a/test/tfile.c
+++ b/test/tfile.c
@@ -224,7 +224,7 @@ test_file_create(void)
/* First ensure the file does not exist */
HDremove(FILE1);
- /* Try opening a non-existant file */
+ /* Try opening a non-existent file */
fid1 = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT);
VERIFY(fid1, FAIL, "H5Fopen");
@@ -1141,7 +1141,7 @@ test_get_obj_ids(void)
CHECK(oid_count, FAIL, "H5Fget_obj_count");
VERIFY(oid_count, NGROUPS + 2, "H5Fget_obj_count");
- /* Get the IDs of the left opend objects */
+ /* Get the IDs of the left opened objects */
ret_count = H5Fget_obj_ids(fid, H5F_OBJ_ALL, (size_t)oid_list_size, oid_list);
CHECK(ret_count, FAIL, "H5Fget_obj_ids");
VERIFY(ret_count, oid_list_size, "H5Fget_obj_count");
@@ -1610,7 +1610,7 @@ test_file_perm2(void)
**
** test_file_ishdf5(): low-level file test routine.
** This test checks whether the H5Fis_hdf5() routine is working
-** correctly in variuous situations.
+** correctly in various situations.
**
*****************************************************************/
static void
@@ -1651,7 +1651,7 @@ test_file_ishdf5(void)
file = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT);
CHECK(file, FAIL, "H5Fcreate");
- /* Release file-creation property list */
+ /* Release file creation property list */
ret = H5Pclose(fcpl);
CHECK(ret, FAIL, "H5Pclose");
@@ -1689,7 +1689,7 @@ test_file_ishdf5(void)
**
** test_file_open_dot(): low-level file test routine.
** This test checks whether opening objects with "." for a name
-** works correctly in variuous situations.
+** works correctly in various situations.
**
*****************************************************************/
static void
@@ -2057,7 +2057,7 @@ test_file_double_root_open(void)
grp2_id = H5Gopen2(file2_id, "/", H5P_DEFAULT);
CHECK(grp2_id, FAIL, "H5Gopen2");
- /* Note "assymetric" close order */
+ /* Note "asymmetric" close order */
ret = H5Gclose(grp1_id);
CHECK(ret, FAIL, "H5Gclose");
ret = H5Gclose(grp2_id);
@@ -2096,7 +2096,7 @@ test_file_double_group_open(void)
grp2_id = H5Gopen2(file2_id, GRP_NAME, H5P_DEFAULT);
CHECK(grp2_id, FAIL, "H5Gopen2");
- /* Note "assymetric" close order */
+ /* Note "asymmetric" close order */
ret = H5Gclose(grp1_id);
CHECK(ret, FAIL, "H5Gclose");
ret = H5Gclose(grp2_id);
@@ -2145,7 +2145,7 @@ test_file_double_dataset_open(void)
ret = H5Sclose(space_id);
CHECK(ret, FAIL, "H5Sclose");
- /* Note "assymetric" close order */
+ /* Note "asymmetric" close order */
ret = H5Dclose(dset1_id);
CHECK(ret, FAIL, "H5Dclose");
ret = H5Dclose(dset2_id);
@@ -2546,7 +2546,7 @@ test_file_double_datatype_open(void)
type2_id = H5Topen2(file2_id, TYPE_NAME, H5P_DEFAULT);
CHECK(type2_id, FAIL, "H5Topen2");
- /* Note "assymetric" close order */
+ /* Note "asymmetric" close order */
ret = H5Tclose(type1_id);
CHECK(ret, FAIL, "H5Tclose");
ret = H5Tclose(type2_id);
@@ -2590,7 +2590,7 @@ test_userblock_file_size(void)
ret = H5Pset_userblock(fcpl2_id, USERBLOCK_SIZE);
CHECK(ret, FAIL, "H5Pset_userblock");
- /* Create files. Onyl file2 with have a userblock. */
+ /* Create files. Only file2 with have a userblock. */
file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
CHECK(file1_id, FAIL, "H5Fcreate");
file2_id = H5Fcreate(FILE2, H5F_ACC_TRUNC, fcpl2_id, H5P_DEFAULT);
@@ -3502,7 +3502,7 @@ test_userblock_alignment_paged(void)
* Strategy is H5F_FILE_SPACE_NONE; fsp_size = 1024
* H5Pset_alignment() is 16
* Outcome:
- * Should succed:
+ * Should succeed:
* userblock (512) is integral multiple of alignment (16)
*/
/* Create file creation property list with user block */
@@ -3541,7 +3541,7 @@ test_userblock_alignment_paged(void)
* H5Pset_alignment() is 3
* Reopen the file; H5Pset_alignment() is 1024
* Outcome:
- * Should succed:
+ * Should succeed:
* Userblock (512) is the same as alignment (512);
* The H5Pset_alignment() calls have no effect
*/
@@ -3610,7 +3610,7 @@ test_filespace_info(const char *env_h5_drvr)
hsize_t fs_threshold; /* Free-space section threshold--iteration variable */
hsize_t fsp_size; /* File space page size */
char filename[FILENAME_LEN]; /* Filename to use */
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
herr_t ret; /* Return value */
/* Output message about test being performed */
@@ -4852,7 +4852,7 @@ test_filespace_1_10_0_compatible(void)
** Verify that the trunk can open, read and modify these files--
** 1) They are initially created (via gen_filespace.c) in the trunk
** with combinations of file space strategies, default/non-default
-** threshold, and file spacing paging enabled/disbled.
+** threshold, and file spacing paging enabled/disabled.
** The library creates the file space info message with
** "mark if unknown" in these files.
** 2) They are copied to the 1.8 branch, and are opened/read/modified
@@ -6000,7 +6000,7 @@ test_libver_bounds_dataset(hid_t fapl)
VERIFY(dset->shared->dcpl_cache.fill.version, H5O_fill_ver_bounds[low], "H5O_fill_ver_bounds");
}
- /* Verify filter pipleline message version */
+ /* Verify filter pipeline message version */
VERIFY(dset->shared->dcpl_cache.pline.version, H5O_pline_ver_bounds[low], "H5O_pline_ver_bounds");
/* Close the dataset */
@@ -6053,7 +6053,7 @@ test_libver_bounds_dataset(hid_t fapl)
ret = H5Sclose(sid);
CHECK(ret, FAIL, "H5Sclose");
- /* Close the datset creation property list */
+ /* Close the dataset creation property list */
ret = H5Pclose(dcpl);
CHECK(ret, FAIL, "H5Pclose");
@@ -6073,7 +6073,7 @@ test_libver_bounds_dataset(hid_t fapl)
/* Loop through all the combinations of low/high bounds in new_fapl */
/* Open the file with the fapl and create the chunked dataset */
- /* Verify the dataset's layout, fill value and filter pipleline message versions */
+ /* Verify the dataset's layout, fill value and filter pipeline message versions */
for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) {
for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) {
H5E_BEGIN_TRY
@@ -6178,7 +6178,7 @@ test_libver_bounds_dataset(hid_t fapl)
** new fapl:
** --Open the same file in (a) with the fapl
** --Create a chunked dataset, a compact dataset and
-** a contigous dataset
+** a contiguous dataset
** --Verify the dataspace message version for these
** three datasets
** --Delete the three datasets and the dataspaces
@@ -6527,7 +6527,7 @@ static void
test_libver_bounds_datatype_check(hid_t fapl, hid_t tid)
{
hid_t fid = H5I_INVALID_HID; /* File ID */
- hid_t new_fapl = H5I_INVALID_HID; /* File acess property list */
+ hid_t new_fapl = H5I_INVALID_HID; /* File access property list */
hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */
hid_t dtid = H5I_INVALID_HID; /* Datatype ID for the dataset */
hid_t str_tid = H5I_INVALID_HID; /* String datatype ID */
@@ -6787,7 +6787,7 @@ test_libver_bounds_attributes(hid_t fapl)
fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
- /* Integer datatpye */
+ /* Integer datatype */
tid = H5Tcopy(H5T_NATIVE_INT);
CHECK(tid, H5I_INVALID_HID, "H5Tcopy");
@@ -6896,7 +6896,7 @@ test_libver_bounds_attributes(hid_t fapl)
fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, fcpl, fapl);
CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
- /* Create an integer datatye */
+ /* Create an integer datatype */
tid = H5Tcopy(H5T_NATIVE_INT);
CHECK(tid, H5I_INVALID_HID, "H5Tcopy");
@@ -7691,7 +7691,7 @@ test_file(void)
test_libver_macros(); /* Test the macros for library version comparison */
test_libver_macros2(); /* Test the macros for library version comparison */
test_incr_filesize(); /* Test H5Fincrement_filesize() and H5Fget_eoa() */
- test_min_dset_ohdr(); /* Test datset object header minimization */
+ test_min_dset_ohdr(); /* Test dataset object header minimization */
#ifndef H5_NO_DEPRECATED_SYMBOLS
test_deprec(); /* Test deprecated routines */
#endif /* H5_NO_DEPRECATED_SYMBOLS */
diff --git a/test/tgenprop.c b/test/tgenprop.c
index 719b928..0a5e569 100644
--- a/test/tgenprop.c
+++ b/test/tgenprop.c
@@ -107,7 +107,7 @@ test_genprop_basic_class(void)
ret = H5Pequal(cid2, H5P_ROOT);
VERIFY(ret, 1, "H5Pequal");
- /* Make certain false postives aren't being returned */
+ /* Make certain false positives aren't being returned */
ret = H5Pequal(cid2, H5P_FILE_CREATE);
VERIFY(ret, 0, "H5Pequal");
@@ -185,7 +185,7 @@ test_genprop_basic_class_prop(void)
CHECK_I(ret, "H5Pget_nprops");
VERIFY(nprops, 0, "H5Pget_nprops");
- /* Check the existance of the first property (should fail) */
+ /* Check the existence of the first property (should fail) */
ret = H5Pexist(cid1, PROP1_NAME);
VERIFY(ret, 0, "H5Pexist");
@@ -199,7 +199,7 @@ test_genprop_basic_class_prop(void)
H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
VERIFY(ret, FAIL, "H5Pregister2");
- /* Check the existance of the first property */
+ /* Check the existence of the first property */
ret = H5Pexist(cid1, PROP1_NAME);
VERIFY(ret, 1, "H5Pexist");
@@ -223,7 +223,7 @@ test_genprop_basic_class_prop(void)
H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
VERIFY(ret, FAIL, "H5Pregister2");
- /* Check the existance of the second property */
+ /* Check the existence of the second property */
ret = H5Pexist(cid1, PROP2_NAME);
VERIFY(ret, 1, "H5Pexist");
@@ -242,7 +242,7 @@ test_genprop_basic_class_prop(void)
H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
CHECK_I(ret, "H5Pregister2");
- /* Check the existance of the third property */
+ /* Check the existence of the third property */
ret = H5Pexist(cid1, PROP3_NAME);
VERIFY(ret, 1, "H5Pexist");
@@ -1942,7 +1942,7 @@ test_genprop_deprec_class(void)
CHECK_I(ret, "H5Pget_nprops");
VERIFY(nprops, 0, "H5Pget_nprops");
- /* Check the existance of the first property (should fail) */
+ /* Check the existence of the first property (should fail) */
ret = H5Pexist(cid1, PROP1_NAME);
VERIFY(ret, 0, "H5Pexist");
@@ -1954,7 +1954,7 @@ test_genprop_deprec_class(void)
ret = H5Pregister1(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
VERIFY(ret, FAIL, "H5Pregister1");
- /* Check the existance of the first property */
+ /* Check the existence of the first property */
ret = H5Pexist(cid1, PROP1_NAME);
VERIFY(ret, 1, "H5Pexist");
@@ -1976,7 +1976,7 @@ test_genprop_deprec_class(void)
ret = H5Pregister1(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
VERIFY(ret, FAIL, "H5Pregister1");
- /* Check the existance of the second property */
+ /* Check the existence of the second property */
ret = H5Pexist(cid1, PROP2_NAME);
VERIFY(ret, 1, "H5Pexist");
@@ -1994,7 +1994,7 @@ test_genprop_deprec_class(void)
ret = H5Pregister1(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
CHECK_I(ret, "H5Pregister1");
- /* Check the existance of the third property */
+ /* Check the existence of the third property */
ret = H5Pexist(cid1, PROP3_NAME);
VERIFY(ret, 1, "H5Pexist");
diff --git a/test/th5o.c b/test/th5o.c
index f1790d0..31b6782 100644
--- a/test/th5o.c
+++ b/test/th5o.c
@@ -466,7 +466,7 @@ test_h5o_refcount(void)
CHECK(ret, FAIL, "H5Oget_info_by_name");
VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name");
- /* Increment the reference counts and then close the file to make sure the increment is permanant */
+ /* Increment the reference counts and then close the file to make sure the increment is permanent */
ret = H5Oincr_refcount(grp);
CHECK(ret, FAIL, "H5Oincr_refcount");
ret = H5Oincr_refcount(dtype);
diff --git a/test/th5s.c b/test/th5s.c
index 8144504..8d414c9 100644
--- a/test/th5s.c
+++ b/test/th5s.c
@@ -1104,7 +1104,7 @@ test_h5s_zero_dim(void)
dset1 = H5Dopen2(fid1, BASICDATASET1, H5P_DEFAULT);
CHECK(dset1, FAIL, "H5Dopen2");
- /* Get the space of the dataset and querry it */
+ /* Get the space of the dataset and query it */
sid1 = H5Dget_space(dset1);
CHECK(sid1, FAIL, "H5Dget_space");
diff --git a/test/tid.c b/test/tid.c
index d8b2d45..71c5af3 100644
--- a/test/tid.c
+++ b/test/tid.c
@@ -41,7 +41,7 @@ basic_id_test(void)
int num_ref;
hsize_t num_members;
- /* Try to register an ID with ficticious types */
+ /* Try to register an ID with fictitious types */
H5E_BEGIN_TRY
arrayID = H5Iregister((H5I_type_t)420, testObj);
H5E_END_TRY
@@ -58,7 +58,7 @@ basic_id_test(void)
if (arrayID != H5I_INVALID_HID)
goto out;
- /* Try to access IDs with ficticious types */
+ /* Try to access IDs with fictitious types */
H5E_BEGIN_TRY
testPtr = H5Iobject_verify((hid_t)100, (H5I_type_t)0);
H5E_END_TRY
diff --git a/test/timer.c b/test/timer.c
index ea9787a..2de7980 100644
--- a/test/timer.c
+++ b/test/timer.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/tmisc.c b/test/tmisc.c
index 6d813dd..da39318 100644
--- a/test/tmisc.c
+++ b/test/tmisc.c
@@ -1280,7 +1280,7 @@ test_misc8(void)
for (v = 0; v < MISC8_DIM1; v++)
*tdata++ = (int)(((u * MISC8_DIM1) + v) % 13);
- /* Create a file acccess property list */
+ /* Create a file access property list */
fapl = H5Pcreate(H5P_FILE_ACCESS);
CHECK(fapl, FAIL, "H5Pcreate");
@@ -3802,7 +3802,7 @@ test_misc21(void)
/****************************************************************
**
-** test_misc22(): Test SZIP bits-per-pixel paramter.
+** test_misc22(): Test SZIP bits-per-pixel parameter.
** This should be set according to the datatype.
** Tests for precision and offset combo's.
**
diff --git a/test/trefer.c b/test/trefer.c
index 5b7a3cf..2a13bbc 100644
--- a/test/trefer.c
+++ b/test/trefer.c
@@ -1588,14 +1588,14 @@ test_reference_sel_none(void)
hid_t sid = H5I_INVALID_HID; /* Dataspace ID */
hid_t ref_sid = H5I_INVALID_HID; /* Dataspace ID for referenced */
hid_t did = H5I_INVALID_HID; /* Dataset ID */
- hsize_t dims[1] = {SPACE1_DIM1}; /* Dimension siez ofr dataset */
+ hsize_t dims[1] = {SPACE1_DIM1}; /* Dimension size of dataset */
int buf[SPACE1_DIM1]; /* Buffer for data */
hdset_reg_ref_t ref[1]; /* Buffer for reference */
hssize_t npoints; /* Number of points in selection */
herr_t status; /* Return status */
int i; /* Local index variable */
- /* Iniitialize buffer */
+ /* Initialize buffer */
for (i = 0; i < SPACE1_DIM1; i++)
buf[i] = i;
diff --git a/test/tselect.c b/test/tselect.c
index 6a6a6cf..2fd7082 100644
--- a/test/tselect.c
+++ b/test/tselect.c
@@ -1879,7 +1879,7 @@ verify_select_hyper_contig_dr__run_test(const uint16_t *cube_buf, size_t H5_ATTR
uint16_t expected_value; /* Expected value in dataset */
unsigned i, j, k, l, m; /* Local index variables */
size_t s; /* Local index variable */
- hbool_t mis_match; /* Flag to indicate mis-match in expected value */
+ hbool_t mis_match; /* Flag to indicate mismatch in expected value */
HDassert(cube_buf);
HDassert(cube_size > 0);
@@ -2357,7 +2357,7 @@ test_select_hyper_contig_dr__run_test(int test_num, const uint16_t *cube_buf, co
/* Now write the contents of the in memory small cube to slices of
* the on disk cube. After each write, read the on disk cube
- * into memeory, and verify that it contains the expected
+ * into memory, and verify that it contains the expected
* data. Verify that H5Sselect_shape_same() returns true on
* the memory and file selections.
*/
@@ -2742,7 +2742,7 @@ test_select_hyper_checker_board_dr__select_checker_board(hid_t tgt_n_cube_sid, u
i++;
} while ((i <= 1) && (0 >= sel_offset));
- /* Wierdness alert:
+ /* Weirdness alert:
*
* Some how, it seems that selections can extend beyond the
* boundaries of the target dataspace -- hence the following
@@ -3401,7 +3401,7 @@ test_select_hyper_checker_board_dr__run_test(int test_num, const uint16_t *cube_
/* Now write checker board selections of the entries in memory
* small cube to slices of the on disk cube. After each write,
- * read the on disk large cube * into memeory, and verify that
+ * read the on disk large cube * into memory, and verify that
* it contains the expected * data. Verify that
* H5Sselect_shape_same() returns true on the memory and file
* selections.
@@ -5256,7 +5256,7 @@ test_select_hyper_union_stagger(void)
error = H5Fclose(file_id);
CHECK(error, FAIL, "H5Fclose");
- /* Initialize intput buffer */
+ /* Initialize input buffer */
HDmemset(data_out, 0, 7 * 7 * sizeof(int));
/* Open file */
@@ -6785,7 +6785,7 @@ test_select_point_chunk(void)
/* Verify data (later) */
- /* Close everything (inclusing selections) */
+ /* Close everything (including selections) */
ret = H5Sclose(pnt1_space);
CHECK(ret, FAIL, "H5Sclose");
ret = H5Sclose(pnt2_space);
@@ -10955,7 +10955,7 @@ test_shape_same_dr__full_space_vs_slice(int test_num, int small_rank, int large_
** m-cube (m >= n).
**
** 2) The dimensions selected in the slice through the m-cube
-** are the dimesnions with the most quickly changing
+** are the dimensions with the most quickly changing
** indices.
**
****************************************************************/
@@ -11251,7 +11251,7 @@ test_shape_same_dr__checkerboard(int test_num, int small_rank, int large_rank, i
CHECK(ret, FAIL, "H5Sselect_hyperslab");
} /* end if */
- /* Wierdness alert:
+ /* Weirdness alert:
*
* Some how, it seems that selections can extend beyond the
* boundaries of the target dataspace -- hence the following
@@ -11346,7 +11346,7 @@ test_shape_same_dr__checkerboard(int test_num, int small_rank, int large_rank, i
CHECK(ret, FAIL, "H5Sselect_hyperslab");
} /* end if */
- /* Wierdness alert:
+ /* Weirdness alert:
*
* Again, it seems that selections can extend beyond the
* boundaries of the target dataspace -- hence the following
@@ -12126,7 +12126,7 @@ test_space_rebuild(void)
ret = FAIL;
CHECK(ret, FAIL, "H5S_hyper_rebuild");
}
- /* No need to do shape comparision */
+ /* No need to do shape comparison */
MESSAGE(7, ("Testing functionality to rebuild 2-D hyperslab selection\n"));
/* Create 2-D dataspace */
@@ -12218,7 +12218,7 @@ test_space_rebuild(void)
ret = FAIL;
CHECK(ret, FAIL, "H5S_hyper_rebuild");
}
- /* No need to do shape comparision */
+ /* No need to do shape comparison */
MESSAGE(7, ("Testing functionality to rebuild 3-D hyperslab selection\n"));
@@ -12320,7 +12320,7 @@ test_space_rebuild(void)
ret = FAIL;
CHECK(ret, FAIL, "H5S_hyper_rebuild");
}
- /* No need to do shape comparision */
+ /* No need to do shape comparison */
MESSAGE(7, ("Testing functionality to rebuild 4-D hyperslab selection\n"));
@@ -12439,7 +12439,7 @@ test_space_rebuild(void)
ret = FAIL;
CHECK(ret, FAIL, "H5S_hyper_rebuild");
}
- /* No need to do shape comparision */
+ /* No need to do shape comparison */
MESSAGE(7, ("Testing functionality to rebuild 5-D hyperslab selection\n"));
@@ -12568,7 +12568,7 @@ test_space_rebuild(void)
ret = FAIL;
CHECK(ret, FAIL, "H5S_hyper_rebuild");
}
- /* No need to do shape comparision */
+ /* No need to do shape comparison */
/* We use 5-D to test a special case with
rebuilding routine TRUE, FALSE and TRUE */
@@ -12615,7 +12615,7 @@ test_space_rebuild(void)
ret = FAIL;
CHECK(ret, FAIL, "H5S_hyper_rebuild");
}
- /* No need to do shape comparision */
+ /* No need to do shape comparison */
/* Adding some selections to make it real irregular */
start5[3] = 1;
@@ -12643,7 +12643,7 @@ test_space_rebuild(void)
ret = FAIL;
CHECK(ret, FAIL, "H5S_hyper_rebuild");
}
- /* No need to do shape comparision */
+ /* No need to do shape comparison */
/* Add more selections to make it regular again */
start5[3] = 5;
@@ -12671,7 +12671,7 @@ test_space_rebuild(void)
ret = FAIL;
CHECK(ret, FAIL, "H5S_hyper_rebuild");
}
- /* No need to do shape comparision */
+ /* No need to do shape comparison */
H5Sclose(sid_reg1);
CHECK(ret, FAIL, "H5Sclose");
@@ -12977,7 +12977,7 @@ test_space_update_diminfo(void)
CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
} /* end if */
- /* Add block parially overlapping first, with OR */
+ /* Add block partially overlapping first, with OR */
start1[0] = 4;
count1[0] = 1;
block1[0] = 2;
@@ -13022,7 +13022,7 @@ test_space_update_diminfo(void)
CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
} /* end if */
- /* Add equally sized block parially overlapping current, with XOR */
+ /* Add equally sized block partially overlapping current, with XOR */
start1[0] = 3;
count1[0] = 1;
block1[0] = 5;
@@ -13056,7 +13056,7 @@ test_space_update_diminfo(void)
CHECK(ret, FAIL, "H5S_hyper_rebuild");
} /* end if */
- /* Add differently sized block parially overlapping current, with XOR */
+ /* Add differently sized block partially overlapping current, with XOR */
start1[0] = 4;
count1[0] = 1;
block1[0] = 5;
@@ -14232,7 +14232,7 @@ test_hyper_regular(void)
**
****************************************************************/
static void
-test_hyper_unlim_check(hid_t sid, hsize_t *dims, hssize_t enpoints, hssize_t enblocks, hsize_t *eblock1,
+test_hyper_unlim_check(hid_t sid, hsize_t *dims, hssize_t endpoints, hssize_t enblocks, hsize_t *eblock1,
hsize_t *eblock2)
{
hid_t lim_sid;
@@ -14257,7 +14257,7 @@ test_hyper_unlim_check(hid_t sid, hsize_t *dims, hssize_t enpoints, hssize_t enb
/* Check number of elements */
npoints = H5Sget_select_npoints(lim_sid);
CHECK(npoints, FAIL, "H5Sget_select_npoints");
- VERIFY(npoints, enpoints, "H5Sget_select_npoints");
+ VERIFY(npoints, endpoints, "H5Sget_select_npoints");
/* Get selection type */
sel_type = H5Sget_select_type(lim_sid);
diff --git a/test/tsohm.c b/test/tsohm.c
index 7ef283b..fdc3375 100644
--- a/test/tsohm.c
+++ b/test/tsohm.c
@@ -636,7 +636,7 @@ size1_helper(hid_t file, const char *filename, hid_t fapl_id, hbool_t test_file_
if (GetTestExpress() > 1)
test_file_closing = FALSE;
- /* Intialize wdata */
+ /* Initialize wdata */
HDmemset(&wdata, 0, sizeof(wdata));
wdata.i1 = 11;
HDstrcpy(wdata.str, "string");
@@ -649,7 +649,7 @@ size1_helper(hid_t file, const char *filename, hid_t fapl_id, hbool_t test_file_
wdata.i8 = 88;
wdata.f1 = 0.0F;
- /* Intialize rdata */
+ /* Initialize rdata */
HDmemset(&rdata, 0, sizeof(rdata));
dtype1_id = make_dtype_1();
@@ -762,7 +762,7 @@ error:
* Function: getsize_testsize1
*
* Purpose: Creates a test file, populates it, and returns its file size.
- * Oject header information from the "first" dataset in the file
+ * Object header information from the "first" dataset in the file
* is stored in pointer `oinfo`.
*
* Programmer: Jacob Smith
@@ -1551,7 +1551,7 @@ size2_dump_struct(const char *name, size2_helper_struct *sizes)
/*-------------------------------------------------------------------------
* Function: size2_helper
*
- * Purpose: A helper functon for test_sohm_size2.
+ * Purpose: A helper function for test_sohm_size2.
*
* Creates a file using the given fcpl, then creates lots
* of different kinds of messages within the file and
@@ -1931,7 +1931,7 @@ size2_helper(hid_t fcpl_id, int test_file_closing, size2_helper_struct *ret_size
/*-------------------------------------------------------------------------
* Function: size2_verify
*
- * Purpose: A helper functon to verify the file created by size2_helper.
+ * Purpose: A helper function to verify the file created by size2_helper.
*
* Runs various tests (not exhaustive) to ensure that the
* file FILENAME actually has the structure that size2_helper
diff --git a/test/ttsafe_attr_vlen.c b/test/ttsafe_attr_vlen.c
index 2fe140c..d105ffe 100644
--- a/test/ttsafe_attr_vlen.c
+++ b/test/ttsafe_attr_vlen.c
@@ -136,7 +136,7 @@ tts_attr_vlen_thread(void H5_ATTR_UNUSED *client_data)
gid = H5Gopen2(fid, "/", H5P_DEFAULT);
CHECK(gid, H5I_INVALID_HID, "H5Gopen");
- /* Open the attribte */
+ /* Open the attribute */
aid = H5Aopen(gid, "root_attr", H5P_DEFAULT);
CHECK(aid, H5I_INVALID_HID, "H5Aopen");
diff --git a/test/ttsafe_error.c b/test/ttsafe_error.c
index 7c92d65..ceaca25 100644
--- a/test/ttsafe_error.c
+++ b/test/ttsafe_error.c
@@ -114,7 +114,7 @@ tts_error(void)
H5TS_wait_for_thread(threads[i]);
if (error_flag_g) {
- TestErrPrintf("At least one thread reported a value that was different from the exected value\n");
+ TestErrPrintf("At least one thread reported a value that was different from the expected value\n");
HDprintf("(Update this test if the error stack changed!)\n");
}
diff --git a/test/tvltypes.c b/test/tvltypes.c
index 1b9bfad..9e4f685 100644
--- a/test/tvltypes.c
+++ b/test/tvltypes.c
@@ -131,7 +131,7 @@ test_vltypes_dataset_create(void)
dcpl = H5Pcreate(H5P_DATASET_CREATE);
CHECK(dcpl, FAIL, "H5Pcreate");
- /* Set fill value writting time to be NEVER */
+ /* Set fill value writing time to be NEVER */
ret = H5Pset_fill_time(dcpl, H5D_FILL_TIME_NEVER);
CHECK(ret, FAIL, "H5Pset_fill_time");
diff --git a/test/twriteorder.c b/test/twriteorder.c
index 9cff00a..c7984e7 100644
--- a/test/twriteorder.c
+++ b/test/twriteorder.c
@@ -54,7 +54,7 @@
*
* If the system, in which the writer and reader processes run, the readers
* will always get all chain-linked blocks correctly. If the order of write
- * is not maintained, some reader processes may found unexpect block data.
+ * is not maintained, some reader processes may found unexpected block data.
*
*************************************************************/
@@ -294,7 +294,7 @@ write_wo_file(void)
return -1;
}
- /* all writes done. return succeess. */
+ /* all writes done. return success. */
#ifdef DEBUG
HDprintf("wrote %d blocks\n", nlinkedblock_g);
#endif
diff --git a/test/unlink.c b/test/unlink.c
index a154671..92cfab7 100644
--- a/test/unlink.c
+++ b/test/unlink.c
@@ -614,7 +614,7 @@ test_filespace(hid_t fapl)
/* Create common objects for datasets */
- /* Create dataset creation property list for contigous storage */
+ /* Create dataset creation property list for contiguous storage */
if ((contig_dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
TEST_ERROR
@@ -1096,7 +1096,7 @@ test_filespace(hid_t fapl)
PASSED();
- /* Create simple group hiearchy, remove it & verify file size */
+ /* Create simple group hierarchy, remove it & verify file size */
TESTING(" simple group hierarchy");
/* Create file */
@@ -1135,7 +1135,7 @@ test_filespace(hid_t fapl)
PASSED();
- /* Create complex group hiearchy, remove it & verify file size */
+ /* Create complex group hierarchy, remove it & verify file size */
TESTING(" complex group hierarchy");
/* Create file */
diff --git a/test/unregister.c b/test/unregister.c
index b2e1a57..9b72f2c 100644
--- a/test/unregister.c
+++ b/test/unregister.c
@@ -2,7 +2,7 @@
* Copyright by The HDF Group. *
* All rights reserved. *
* *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://www.hdfgroup.org/licenses. *
diff --git a/test/use_common.c b/test/use_common.c
index 8f78f3b..7b5df53 100644
--- a/test/use_common.c
+++ b/test/use_common.c
@@ -298,7 +298,7 @@ write_uc_file(hbool_t tosend, hid_t file_id, options_t *opts)
return -1;
}
- /* verify chunk_dims against set paramenters */
+ /* verify chunk_dims against set parameters */
if (chunk_dims[0] != opts->chunkdims[0] || chunk_dims[1] != cz || chunk_dims[2] != cz) {
HDfprintf(stderr, "chunk size is not as expected. Got dims=(%llu,%llu,%llu)\n",
(unsigned long long)chunk_dims[0], (unsigned long long)chunk_dims[1],
@@ -533,7 +533,7 @@ read_uc_file(hbool_t towait, options_t *opts)
/* quit when all nplanes have been read */
loops_waiting_for_plane = 0;
while (nplanes_seen < opts->nplanes) {
- /* print progress message according to if new planes are availalbe */
+ /* print progress message according to if new planes are available */
if (nplanes_seen < dims[0]) {
if (loops_waiting_for_plane) {
/* end the previous message */
diff --git a/test/use_disable_mdc_flushes.c b/test/use_disable_mdc_flushes.c
index d1b693b..ea674ec 100644
--- a/test/use_disable_mdc_flushes.c
+++ b/test/use_disable_mdc_flushes.c
@@ -360,7 +360,7 @@ write_file(void)
return -1;
}
- /* verify chunk_dims against set paramenters */
+ /* verify chunk_dims against set parameters */
if (chunk_dims[0] != chunkdims_g[0] || chunk_dims[1] != cz || chunk_dims[2] != cz) {
HDfprintf(stderr, "chunk size is not as expected. Got dims=(%llu,%llu,%llu)\n",
(unsigned long long)chunk_dims[0], (unsigned long long)chunk_dims[1],
diff --git a/test/vds.c b/test/vds.c
index a15d0fc..11e6327 100644
--- a/test/vds.c
+++ b/test/vds.c
@@ -196,7 +196,7 @@ vds_select_equal(hid_t space1, hid_t space2)
if (nblocks1 != nblocks2)
return FALSE;
- /* Allocate block lists. Do not return directly afer
+ /* Allocate block lists. Do not return directly after
* allocating, to make sure buffers are freed. */
if (NULL ==
(buf1 = (hsize_t *)HDmalloc((size_t)2 * (size_t)rank1 * (size_t)nblocks1 * sizeof(*buf1))))
@@ -479,7 +479,7 @@ test_api_get_ex_dcpl(test_api_config_t config, hid_t fapl, hid_t dcpl, hid_t *ex
/* Verify examination DCPL is equal to original DCPL. Do not compare the
* plist to itself, and do not do the comparison if we reopened the file,
- * because in that case the extent of the source dset will not be corrent.
+ * because in that case the extent of the source dset will not be current.
*/
if ((*ex_dcpl != dcpl) && (config != TEST_API_REOPEN_FILE)) {
if ((tri_ret = H5Pequal(dcpl, *ex_dcpl)) < 0)
@@ -1044,7 +1044,7 @@ test_api(test_api_config_t config, hid_t fapl)
if ((vspace[i] = H5Screate_simple(2, dims, NULL)) < 0)
TEST_ERROR
- /* Select row in virual dataspace */
+ /* Select row in virtual dataspace */
start[0] = (hsize_t)i;
if (H5Sselect_hyperslab(vspace[i], H5S_SELECT_SET, start, NULL, count, block) < 0)
TEST_ERROR
@@ -1181,7 +1181,7 @@ test_vds_prefix_first(unsigned config, hid_t vds_fapl, hid_t src_fapl)
hid_t srcspace[4] = {-1, -1, -1, -1}; /* Source dataspaces */
hid_t vspace[4] = {-1, -1, -1, -1}; /* Virtual dset dataspaces */
hid_t memspace = -1; /* Memory dataspace */
- hid_t srcdset[4] = {-1, -1, -1, -1}; /* Source datsets */
+ hid_t srcdset[4] = {-1, -1, -1, -1}; /* Source datasets */
hid_t vdset = -1; /* Virtual dataset */
hsize_t dims[4] = {10, 26, 0, 0}; /* Data space current size */
int buf[10][26]; /* Write and expected read buffer */
@@ -1460,7 +1460,7 @@ test_basic_io(unsigned config, hid_t vds_fapl, hid_t src_fapl)
hid_t srcspace[4] = {-1, -1, -1, -1}; /* Source dataspaces */
hid_t vspace[4] = {-1, -1, -1, -1}; /* Virtual dset dataspaces */
hid_t memspace = -1; /* Memory dataspace */
- hid_t srcdset[4] = {-1, -1, -1, -1}; /* Source datsets */
+ hid_t srcdset[4] = {-1, -1, -1, -1}; /* Source datasets */
hid_t vdset = -1; /* Virtual dataset */
hsize_t dims[4] = {10, 26, 0, 0}; /* Data space current size */
hsize_t start[4]; /* Hyperslab start */
@@ -3659,7 +3659,7 @@ test_unlim(unsigned config, hid_t vds_fapl, hid_t src_fapl)
hid_t vspace[4] = {-1, -1, -1, -1}; /* Virtual dset dataspaces */
hid_t memspace = -1; /* Memory dataspace */
hid_t filespace = -1; /* File dataspace */
- hid_t srcdset[4] = {-1, -1, -1, -1}; /* Source datsets */
+ hid_t srcdset[4] = {-1, -1, -1, -1}; /* Source datasets */
hid_t vdset = -1; /* Virtual dataset */
hsize_t dims[2] = {10, 10}; /* Data space current size */
hsize_t mdims[2] = {10, 20}; /* Data space maximum size */
@@ -6673,7 +6673,7 @@ test_printf(unsigned config, hid_t vds_fapl, hid_t src_fapl)
hid_t vspace[2] = {-1, -1}; /* Virtual dset dataspaces */
hid_t memspace = -1; /* Memory dataspace */
hid_t filespace = -1; /* File dataspace */
- hid_t srcdset[6] = {-1, -1, -1, -1, -1, -1}; /* Source datsets */
+ hid_t srcdset[6] = {-1, -1, -1, -1, -1, -1}; /* Source datasets */
hid_t vdset = -1; /* Virtual dataset */
hsize_t dims[2] = {10, 0}; /* Data space current size */
hsize_t mdims[2] = {10, 20}; /* Data space maximum size */
@@ -10381,7 +10381,7 @@ test_all(unsigned config, hid_t vds_fapl, hid_t src_fapl)
hid_t vspace[3] = {-1, -1, -1}; /* Virtual dset dataspaces */
hid_t memspace = -1; /* Memory dataspace */
hid_t filespace = -1; /* File dataspace */
- hid_t srcdset[5] = {-1, -1, -1, -1, -1}; /* Source datsets */
+ hid_t srcdset[5] = {-1, -1, -1, -1, -1}; /* Source datasets */
hid_t vdset = -1; /* Virtual dataset */
hsize_t dims[2] = {6, 6}; /* Data space current size */
hsize_t mdims[2] = {10, 10}; /* Data space maximum size */
diff --git a/test/vds_env.c b/test/vds_env.c
index b8d8bbd..88b649e 100644
--- a/test/vds_env.c
+++ b/test/vds_env.c
@@ -58,7 +58,7 @@ test_vds_prefix_second(unsigned config, hid_t fapl)
hid_t srcspace[4] = {-1, -1, -1, -1}; /* Source dataspaces */
hid_t vspace[4] = {-1, -1, -1, -1}; /* Virtual dset dataspaces */
hid_t memspace = -1; /* Memory dataspace */
- hid_t srcdset[4] = {-1, -1, -1, -1}; /* Source datsets */
+ hid_t srcdset[4] = {-1, -1, -1, -1}; /* Source datasets */
hid_t vdset = -1; /* Virtual dataset */
hsize_t dims[4] = {10, 26, 0, 0}; /* Data space current size */
int buf[10][26]; /* Write and expected read buffer */