summaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authorSongyu Lu <songyulu@hdfgroup.org>2021-11-01 15:57:55 (GMT)
committerSongyu Lu <songyulu@hdfgroup.org>2021-11-01 15:57:55 (GMT)
commitad76c0de4e6502fd82d1a105271e8b680c348e85 (patch)
tree01bc3f09a43b08ccb3835b61420f135f1d5dda80 /test
parent572201b560b87fb0011dea1d3257fe0351483e8c (diff)
downloadhdf5-ad76c0de4e6502fd82d1a105271e8b680c348e85.zip
hdf5-ad76c0de4e6502fd82d1a105271e8b680c348e85.tar.gz
hdf5-ad76c0de4e6502fd82d1a105271e8b680c348e85.tar.bz2
Two purposes for this PR:
1. added an option to enable the legacy SWMR in vfd_swmr_bigset_writer.c. 2. adjusted the options for big set test to make sure it passes the exhaustive test in testvfdswmr.sh.in.
Diffstat (limited to 'test')
-rw-r--r--test/testvfdswmr.sh.in34
-rw-r--r--test/vfd_swmr_bigset_writer.c66
2 files changed, 59 insertions, 41 deletions
diff --git a/test/testvfdswmr.sh.in b/test/testvfdswmr.sh.in
index 4dc1e82..535aafa 100644
--- a/test/testvfdswmr.sh.in
+++ b/test/testvfdswmr.sh.in
@@ -1047,31 +1047,31 @@ fi
#
BIGSET_n=25 # -n option: # of iterations
BIGSET_few_s=10 # -s option: # of datasets (for few_big test)
-BIGSET_many_s=100 # -s option: # of datasets (for many_small test)
+BIGSET_many_s=50 # -s option: # of datasets (for many_small test)
#
#
# Setting for exhaustive and quick runs
#
if [[ "$HDF5TestExpress" -eq 0 ]] ; then # exhaustive run
- BIGSET_n=200
- BIGSET_few_s=100
- BIGSET_many_s=1000
+ BIGSET_n=100
+ BIGSET_few_s=25
+ BIGSET_many_s=100
elif [[ "$HDF5TestExpress" -gt 1 ]]; then # quick run
BIGSET_n=10
BIGSET_few_s=3
- BIGSET_many_s=50
+ BIGSET_many_s=25
fi
#
#
-for options in "-d 1" "-d 1 -F" "-d 2" "-d 2 -F" "-d 1 -t" "-d 1 -t -F" "-d 1 -t -R" "-d 1 -V" "-d 1 -M" "-d 1 -V -F" "-d 1 -M -F"; do
+for options in "-d 1" "-d 1 -F" "-d 2 -l 16" "-d 2 -F -l 16" "-d 1 -t" "-d 1 -t -F" "-d 1 -t -R" "-d 1 -V" "-d 1 -M" "-d 1 -V -F" "-d 1 -M -F"; do
if [ ${do_many_small:-no} = no ]; then
continue
fi
#
- # Test many small datasets of one and two dimensions.
+ # Test many small datasets of two or three dimensions.
#
# Perform 25 iterations on 100 extensible datasets configured with
- # 2D 16x16 chunks or 3D 8x16x16 chunks of 32-bit unsigned integer elements,
+ # 2D 16x16 chunks or 3D 1x16x16 chunks of 32-bit unsigned integer elements,
# expanding each dataset by a chunk in one dimension (up to 25x1
# 16x16 chunks) on each iteration.
#
@@ -1079,16 +1079,16 @@ for options in "-d 1" "-d 1 -F" "-d 2" "-d 2 -F" "-d 1 -t" "-d 1 -t -F" "-d 1 -t
# in *two* dimensions (up to 25x25 16x16 chunks).
#
# If testing 3D datasets (-t option), extending each dataset along the
- # first dimension (up to 25 8x16x16)
+ # first dimension (up to 25 1x16x16)
#
echo launch vfd_swmr_bigset_writer many small, options $options
catch_out_err_and_rc vfd_swmr_bigset_writer \
- ../vfd_swmr_bigset_writer -n $BIGSET_n $options -s $BIGSET_many_s -e 8 -r 16 -c 16 -q -l 3 &
+ ../vfd_swmr_bigset_writer -n $BIGSET_n $options -s $BIGSET_many_s -e 1 -r 16 -c 16 -q &
pid_writer=$!
catch_out_err_and_rc vfd_swmr_bigset_reader \
- ../vfd_swmr_bigset_reader -n $BIGSET_n $options -s $BIGSET_many_s -e 8 -r 16 -c 16 -q -l 3 &
+ ../vfd_swmr_bigset_reader -n $BIGSET_n $options -s $BIGSET_many_s -e 1 -r 16 -c 16 -q &
pid_reader=$!
# Wait for the reader to finish before signalling the
@@ -1116,12 +1116,12 @@ for options in "-d 1" "-d 1 -F" "-d 2" "-d 2 -F" "-d 1 -t" "-d 1 -t -F" "-d 1 -t
done
# bigset test for bigger chunks
-for options in "-d 1" "-d 1 -F" "-d 2" "-d 2 -F" "-d 1 -t" "-d 1 -t -F" "-d 1 -t -R" "-d 1 -V" "-d 1 -M" "-d 1 -V -F" "-d 1 -M -F"; do
+for options in "-d 1" "-d 1 -F" "-d 2 -l 10" "-d 2 -F -l 10" "-d 1 -t -l 10" "-d 1 -t -F -l 10" "-d 1 -t -R" "-d 1 -V" "-d 1 -M" "-d 1 -V -F" "-d 1 -M -F"; do
#
- # Test a few big datasets of one and two dimensions.
+ # Test a few big datasets of two or three dimensions.
#
# Perform 25 iterations on 10 extensible datasets configured with
- # 2D 256x256 chunks or 3D 64x256x256 of 32-bit unsigned integer elements,
+ # 2D 256x256 chunks or 3D 8x256x256 of 32-bit unsigned integer elements,
# expanding each dataset by a chunk in one dimension (up to 25x1
# 256x256 chunks) on each iteration.
#
@@ -1129,7 +1129,7 @@ for options in "-d 1" "-d 1 -F" "-d 2" "-d 2 -F" "-d 1 -t" "-d 1 -t -F" "-d 1 -t
# in *two* dimensions (up to 25x25 256x256 chunks).
#
# If testing 3D datasets (-t option), extending each dataset along the
- # first dimension (up to 25 64x256x256)
+ # first dimension (up to 25 8x256x256)
#
if [ ${do_few_big:-no} = no ]; then
@@ -1137,11 +1137,11 @@ for options in "-d 1" "-d 1 -F" "-d 2" "-d 2 -F" "-d 1 -t" "-d 1 -t -F" "-d 1 -t
fi
echo launch vfd_swmr_bigset_writer few big, options $options ......may take some time......
catch_out_err_and_rc vfd_swmr_bigset_writer \
- ../vfd_swmr_bigset_writer -n $BIGSET_n $options -s $BIGSET_few_s -e 64 -r 256 -c 256 -q -l 3 &
+ ../vfd_swmr_bigset_writer -n $BIGSET_n $options -s $BIGSET_few_s -e 8 -r 256 -c 256 -q &
pid_writer=$!
catch_out_err_and_rc vfd_swmr_bigset_reader \
- ../vfd_swmr_bigset_reader -n $BIGSET_n $options -s $BIGSET_few_s -e 64 -r 256 -c 256 -q -l 3 &
+ ../vfd_swmr_bigset_reader -n $BIGSET_n $options -s $BIGSET_few_s -e 8 -r 256 -c 256 -q &
pid_reader=$!
# Wait for the reader to finish before signalling the
diff --git a/test/vfd_swmr_bigset_writer.c b/test/vfd_swmr_bigset_writer.c
index 956973a..123efbd 100644
--- a/test/vfd_swmr_bigset_writer.c
+++ b/test/vfd_swmr_bigset_writer.c
@@ -149,6 +149,7 @@ typedef struct {
bool test_3d;
enum { vds_off, vds_single, vds_multi } vds;
bool use_vfd_swmr;
+ bool use_legacy_swmr;
bool use_named_pipe;
bool do_perf;
bool cross_chunk_read;
@@ -214,6 +215,7 @@ state_initializer(void)
.test_3d = false,
.vds = vds_off,
.use_vfd_swmr = true,
+ .use_legacy_swmr = false,
.use_named_pipe = true,
.do_perf = false,
.cross_chunk_read = false,
@@ -256,6 +258,7 @@ usage(const char *progname)
"-P: do the performance measurement\n"
"-R: flush raw data\n"
"-S: do not use VFD SWMR\n"
+ "-T: use legacy SWMR (-S and -N must also be specified)\n"
"-V: use virtual datasets and a single\n"
" source file\n"
"-a steps: `steps` between adding attributes\n"
@@ -339,7 +342,7 @@ state_init(state_t *s, int argc, char **argv)
if (tfile)
HDfree(tfile);
- while ((ch = getopt(argc, argv, "CFMNPRSVa:bc:d:e:f:g:j:k:l:m:n:o:p:qr:s:tu:v:w:")) != -1) {
+ while ((ch = getopt(argc, argv, "CFMNPRSTVa:bc:d:e:f:g:j:k:l:m:n:o:p:qr:s:tu:v:w:")) != -1) {
switch (ch) {
case 'C':
/* This flag indicates cross-over chunk read during data validation */
@@ -362,6 +365,9 @@ state_init(state_t *s, int argc, char **argv)
case 'S':
s->use_vfd_swmr = false;
break;
+ case 'T':
+ s->use_legacy_swmr = true;
+ break;
case 'V':
s->vds = vds_single;
break;
@@ -745,6 +751,18 @@ state_init(state_t *s, int argc, char **argv)
TEST_ERROR;
}
+ if (s->use_legacy_swmr) {
+ if (s->use_vfd_swmr) {
+ HDfprintf(stderr, "Can't use both VFD SWMR and Legacy SWMR\n");
+ TEST_ERROR;
+ }
+
+ if (s->use_named_pipe) {
+ HDfprintf(stderr, "Can't use named pipe for the Legacy SWMR\n");
+ TEST_ERROR;
+ }
+ }
+
return true;
error:
@@ -826,14 +844,14 @@ state_destroy(state_t *s)
if (s->vds != vds_multi) {
if (H5Fvfd_swmr_end_tick(s->file[0]) < 0) {
- HDfprintf(stderr, "H5Fclose failed\n");
+ HDfprintf(stderr, "H5Fvfd_swmr_end_tick failed\n");
TEST_ERROR;
}
}
else {
for (j = 0; j < NELMTS(s->file); j++)
if (H5Fvfd_swmr_end_tick(s->file[j]) < 0) {
- HDfprintf(stderr, "H5Fclose failed\n");
+ HDfprintf(stderr, "H5Fvfd_swmr_end_tick failed\n");
TEST_ERROR;
}
}
@@ -869,7 +887,7 @@ state_destroy(state_t *s)
}
HDfprintf(stdout, "File close time (for running the writer alone) = %lf seconds\n",
- TIME_PASSED(start_time, end_time));
+ TIME_PASSED(start_time, end_time));
}
if (s->dataset)
@@ -1407,9 +1425,8 @@ open_extensible_dset(state_t *s)
if (s->test_3d) {
if (maxdims3[0] != three_dee_max_dims[0] || maxdims3[1] != three_dee_max_dims[1] ||
maxdims3[2] != three_dee_max_dims[2]) {
- HDfprintf(stderr,
- "Unexpected maximum dimensions %" PRIuHSIZE " x %" PRIuHSIZE " x %" PRIuHSIZE,
- maxdims3[0], maxdims3[1], maxdims3[2]);
+ HDfprintf(stderr, "Unexpected maximum dimensions %" PRIuHSIZE " x %" PRIuHSIZE " x %" PRIuHSIZE,
+ maxdims3[0], maxdims3[1], maxdims3[2]);
TEST_ERROR;
}
}
@@ -1417,17 +1434,17 @@ open_extensible_dset(state_t *s)
if (s->expand_2d) {
if (maxdims2[0] != two_dee_max_dims[0] || maxdims2[1] != two_dee_max_dims[1] ||
maxdims2[0] != maxdims2[1]) {
- HDfprintf(stderr, "Unexpected maximum dimensions %" PRIuHSIZE " x %" PRIuHSIZE,
- maxdims2[0], maxdims2[1]);
+ HDfprintf(stderr, "Unexpected maximum dimensions %" PRIuHSIZE " x %" PRIuHSIZE, maxdims2[0],
+ maxdims2[1]);
TEST_ERROR;
}
}
else if (maxdims2[0] != s->one_dee_max_dims[0] || maxdims2[1] != s->one_dee_max_dims[1] ||
dims2[0] != s->chunk_dims[0]) {
HDfprintf(stderr,
- "Unexpected maximum dimensions %" PRIuHSIZE " x %" PRIuHSIZE
- " or columns %" PRIuHSIZE,
- maxdims2[0], maxdims2[1], dims2[1]);
+ "Unexpected maximum dimensions %" PRIuHSIZE " x %" PRIuHSIZE
+ " or columns %" PRIuHSIZE,
+ maxdims2[0], maxdims2[1], dims2[1]);
}
}
@@ -1479,7 +1496,7 @@ create_dsets(state_t s)
}
HDfprintf(stdout, "Dataset creation time (for running the writer alone) = %lf seconds\n",
- TIME_PASSED(start_time, end_time));
+ TIME_PASSED(start_time, end_time));
}
return true;
@@ -2081,7 +2098,7 @@ verify_dsets(state_t s, np_state_t *np, mat_t *mat)
* the validation of the chunks */
if (s.use_named_pipe && below_speed_limit(&(last.time), &(s.ival))) {
AT();
- HDfprintf(stderr, "verify_extensible_dset took too long to finish\n");
+ HDfprintf(stderr, "Warning: verify_extensible_dset took too long to finish\n");
}
/* For checking the time lapse between the writer's finishing writing a batch of chunks
@@ -2109,7 +2126,7 @@ verify_dsets(state_t s, np_state_t *np, mat_t *mat)
/* Print out the performance information */
if (s.use_named_pipe && s.do_perf && counter)
HDfprintf(stdout, "Dataset verification: mean time = %lf, max time = %lf, min time = %lf\n",
- total_time / (double)counter, max_time, min_time);
+ total_time / (double)counter, max_time, min_time);
return true;
@@ -2396,18 +2413,13 @@ write_dsets(state_t s, np_state_t *np, mat_t *mat)
/* Calculate the write speed */
if (s.test_3d)
- throughput =
- ((double)(sizeof(unsigned int) * s.depth * s.rows * s.cols * s.nsteps * s.ndatasets)) /
- time_passed;
+ throughput = ((double)(sizeof(unsigned int) * s.depth * s.rows * s.cols * s.nsteps * s.ndatasets)) / time_passed;
else
- throughput =
- ((double)(sizeof(unsigned int) * s.rows * s.cols * s.nsteps * s.ndatasets)) / time_passed;
+ throughput = ((double)(sizeof(unsigned int) * s.rows * s.cols * s.nsteps * s.ndatasets)) / time_passed;
/* Print out the performance information */
- HDfprintf(stdout,
- "Dataset write time (for running the writer alone) = %lf seconds, write speed = %.2lf "
- "bytes/second\n",
- time_passed, throughput);
+ HDfprintf(stdout, "Dataset write time (for running the writer alone) = %lf seconds, write speed = %.2lf bytes/second\n",
+ time_passed, throughput);
}
return true;
@@ -2544,6 +2556,12 @@ main(int argc, char **argv)
TEST_ERROR;
}
+ /* Enable the Legacy SWMR writing mode if specified */
+ if (s.use_legacy_swmr && H5Fstart_swmr_write(s.file[0]) < 0) {
+ HDfprintf(stderr, "failed to start the Legacy SWMR writing mode\n");
+ TEST_ERROR;
+ }
+
/* Start to write chunks. The writer writes as many chunks as possible within a tick, then
* notify the reader. But it doesn't receive back the reader's notice. */
if (!write_dsets(s, &np, mat)) {