summaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/AtomicWriterReader.txt48
-rw-r--r--test/CMakeLists.txt69
-rw-r--r--test/CMakeTests.cmake18
-rw-r--r--test/Makefile.am47
-rw-r--r--test/POSIX_Order_Write_Test_Report.docxbin0 -> 145445 bytes
-rw-r--r--test/POSIX_Order_Write_Test_Report.pdfbin0 -> 84166 bytes
-rw-r--r--test/SWMR_POSIX_Order_UG.txt94
-rw-r--r--test/SWMR_UseCase_UG.txt223
-rw-r--r--test/atomic_reader.c363
-rw-r--r--test/atomic_writer.c245
-rw-r--r--test/gen_idx.c126
-rw-r--r--test/swmr_addrem_writer.c443
-rw-r--r--test/swmr_check_compat_vfd.c5
-rw-r--r--test/swmr_common.c290
-rw-r--r--test/swmr_common.h80
-rw-r--r--test/swmr_generator.c386
-rw-r--r--test/swmr_reader.c549
-rw-r--r--test/swmr_remove_reader.c519
-rw-r--r--test/swmr_remove_writer.c381
-rw-r--r--test/swmr_sparse_reader.c449
-rw-r--r--test/swmr_sparse_writer.c454
-rw-r--r--test/swmr_start_write.c713
-rw-r--r--test/swmr_writer.c450
-rw-r--r--test/test_usecases.sh.in170
-rw-r--r--test/testswmr.sh.in529
-rw-r--r--test/testvdsswmr.sh.in199
-rw-r--r--test/twriteorder.c463
-rw-r--r--test/use.h64
-rw-r--r--test/use_append_chunk.c233
-rw-r--r--test/use_append_mchunks.c226
-rw-r--r--test/use_common.c653
-rw-r--r--test/use_disable_mdc_flushes.c549
-rw-r--r--test/vds_swmr.h165
-rw-r--r--test/vds_swmr_gen.c178
-rw-r--r--test/vds_swmr_reader.c142
-rw-r--r--test/vds_swmr_writer.c167
36 files changed, 9675 insertions, 15 deletions
diff --git a/test/AtomicWriterReader.txt b/test/AtomicWriterReader.txt
new file mode 100644
index 0000000..dc0a3bd
--- /dev/null
+++ b/test/AtomicWriterReader.txt
@@ -0,0 +1,48 @@
+Atomic Tests Instructions
+=========================
+
+Purpose:
+--------
+This documents how to build and run the Atomic Writer and Reader tests.
+The atomic test is to verify if atomic read-write operation on a system works.
+The two programs are atomic_writer.c and atomic_reader.c.
+atomic_writer.c: is the "write" part of the test; and
+atomic_reader.c: is the "read" part of the test.
+
+Building the Tests
+------------------
+The two test parts are automically built during configure and make process.
+But to build them individually, you can do in test/ directory:
+$ gcc atomic_writer
+$ gcc atomic_reader
+
+Running the Tests
+-----------------
+$ atomic_writer -n <number of integers to write> -i <number of iterations for writer>
+$ atomic_reader -n <number of integers to read> -i <number of iterations for reader>
+
+Note**
+(1) "atomic_data" is the data file used by both the writer/reader in the
+ current directory.
+(2) The value for -n should be the same for both the writer and the reader.
+(3) The values for options n and i should be positive integers.
+(4) For this version, the user has to provide both options -n and -i to run
+ the writer and the reader.
+(5) If the user wants to run the writer for a long time, just provides a
+ large number for -i.
+
+Examples
+--------
+$ ./atomic_writer -n 10000 -i 5
+ Try to atomic write 10000 integers patterns 10000 time, and iterate the whole
+ write process 5 times.
+
+$ ./atomic_reader -n 10000 -i 2
+ Try to atomic read 10000 integers patterns 10000 times, and iterate only once.
+ A summary is posted at the end. If all atomic reads are correct, it will not
+ show any read beyond "0 re-tries", that is all reads have succeeded in the
+ first read attempt.
+
+Remark:
+You usually want the writer to iterate more times than the reader so that
+the writing will not finish before reading is done.
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index bd73c84..1e3ab0f 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -20,11 +20,13 @@ set (TEST_LIB_SOURCES
${HDF5_TEST_SOURCE_DIR}/h5test.c
${HDF5_TEST_SOURCE_DIR}/testframe.c
${HDF5_TEST_SOURCE_DIR}/cache_common.c
+ ${HDF5_TEST_SOURCE_DIR}/swmr_common.c
)
set (TEST_LIB_HEADERS
${HDF5_TEST_SOURCE_DIR}/h5test.h
${HDF5_TEST_SOURCE_DIR}/cache_common.h
+ ${HDF5_TEST_SOURCE_DIR}/swmr_common.h
)
add_library (${HDF5_TEST_LIB_TARGET} STATIC ${TEST_LIB_SOURCES} ${TEST_LIB_HEADERS})
@@ -223,6 +225,7 @@ set (H5_TESTS
objcopy
links
unlink
+ twriteorder
big
mtime
fillval
@@ -256,13 +259,32 @@ foreach (test ${H5_TESTS})
endforeach ()
set (H5_SWMR_TESTS
+ swmr_addrem_writer
swmr_check_compat_vfd
+ swmr_generator
+ swmr_reader
+ swmr_remove_reader
+ swmr_remove_writer
+ swmr_sparse_reader
+ swmr_sparse_writer
+ swmr_start_write
+ swmr_writer
)
foreach (test ${H5_SWMR_TESTS})
ADD_H5_EXE(${test})
endforeach ()
+set (H5_VDS_SWMR_TESTS
+ vds_swmr_gen
+ vds_swmr_reader
+ vds_swmr_writer
+)
+
+foreach (test ${H5_VDS_SWMR_TESTS})
+ ADD_H5_EXE(${test})
+endforeach (test ${H5_VDS_SWMR_TESTS})
+
##############################################################################
##############################################################################
### A D D I T I O N A L T E S T S ###
@@ -326,6 +348,8 @@ set (H5_CHECK_TESTS
err_compat
tcheck_version
testmeta
+ atomic_writer
+ atomic_reader
links_env
flushrefresh
)
@@ -378,4 +402,49 @@ else ()
set_target_properties (plugin PROPERTIES FOLDER test)
endif ()
+##############################################################################
+### U S E C A S E S T E S T S
+##############################################################################
+set (use_append_chunk_SOURCES ${HDF5_TEST_SOURCE_DIR}/use_append_chunk.c ${HDF5_TEST_SOURCE_DIR}/use_common.c)
+add_executable (use_append_chunk ${use_append_chunk_SOURCES})
+TARGET_NAMING (use_append_chunk STATIC)
+TARGET_C_PROPERTIES (use_append_chunk STATIC " " " ")
+target_link_libraries (use_append_chunk ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+set_target_properties (use_append_chunk PROPERTIES FOLDER test)
+if (BUILD_SHARED_LIBS)
+ add_executable (use_append_chunk-shared ${use_append_chunk_SOURCES})
+ TARGET_NAMING (use_append_chunk-shared SHARED)
+ TARGET_C_PROPERTIES (use_append_chunk-shared SHARED " " " ")
+ target_link_libraries (use_append_chunk-shared ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (use_append_chunk-shared PROPERTIES FOLDER test)
+endif ()
+
+set (use_append_mchunks_SOURCES ${HDF5_TEST_SOURCE_DIR}/use_append_mchunks.c ${HDF5_TEST_SOURCE_DIR}/use_common.c)
+add_executable (use_append_mchunks ${use_append_mchunks_SOURCES})
+TARGET_NAMING (use_append_mchunks STATIC)
+TARGET_C_PROPERTIES (use_append_mchunks STATIC " " " ")
+target_link_libraries (use_append_mchunks ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+set_target_properties (use_append_mchunks PROPERTIES FOLDER test)
+if (BUILD_SHARED_LIBS)
+ add_executable (use_append_mchunks-shared ${use_append_mchunks_SOURCES})
+ TARGET_NAMING (use_append_mchunks-shared SHARED)
+ TARGET_C_PROPERTIES (use_append_mchunks-shared SHARED " " " ")
+ target_link_libraries (use_append_mchunks-shared ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (use_append_mchunks-shared PROPERTIES FOLDER test)
+endif ()
+
+set (use_disable_mdc_flushes_SOURCES ${HDF5_TEST_SOURCE_DIR}/use_disable_mdc_flushes.c)
+add_executable (use_disable_mdc_flushes ${use_disable_mdc_flushes_SOURCES})
+TARGET_NAMING (use_disable_mdc_flushes STATIC)
+TARGET_C_PROPERTIES (use_disable_mdc_flushes STATIC " " " ")
+target_link_libraries (use_disable_mdc_flushes ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+set_target_properties (use_disable_mdc_flushes PROPERTIES FOLDER test)
+if (BUILD_SHARED_LIBS)
+ add_executable (use_disable_mdc_flushes-shared ${use_disable_mdc_flushes_SOURCES})
+ TARGET_NAMING (use_disable_mdc_flushes-shared SHARED)
+ TARGET_C_PROPERTIES (use_disable_mdc_flushes-shared SHARED " " " ")
+ target_link_libraries (use_disable_mdc_flushes-shared ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (use_disable_mdc_flushes-shared PROPERTIES FOLDER test)
+endif ()
+
include (CMakeTests.cmake)
diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake
index 20f43bc..495ea2c 100644
--- a/test/CMakeTests.cmake
+++ b/test/CMakeTests.cmake
@@ -364,6 +364,10 @@ set (test_CLEANFILES
layout_extend.h5
zero_chunk.h5
chunk_single.h5
+ swmr_non_latest.h5
+ earray_hdr_fd.h5
+ farray_hdr_fd.h5
+ bt2_hdr_fd.h5
storage_size.h5
dls_01_strings.h5
extend.h5
@@ -457,6 +461,7 @@ set (test_CLEANFILES
tvltypes.h5
tvlstr.h5
tvlstr2.h5
+ twriteorder.dat
flush.h5
flush-swmr.h5
noflush.h5
@@ -532,16 +537,24 @@ set (test_CLEANFILES
vds_dapl.h5
vds_src_0.h5
vds_src_1.h5
+ swmr_data.h5
+ use_use_append_chunk.h5
+ use_append_mchunks.h5
+ use_disable_mdc_flushes.h5
tbogus.h5.copy
flushrefresh.h5
flushrefresh_VERIFICATION_START
flushrefresh_VERIFICATION_CHECKPOINT1
flushrefresh_VERIFICATION_CHECKPOINT2
flushrefresh_VERIFICATION_DONE
+ atomic_data
accum_swmr_big.h5
ohdr_swmr.h5
+ test_swmr*.h5
cache_logging.h5
cache_logging.out
+ vds_swmr.h5
+ vds_swmr_src_*.h5
)
# Remove any output file left over from previous test run
@@ -583,6 +596,7 @@ set (H5TEST_TESTS
objcopy
links
unlink
+ twriteorder
big
mtime
fillval
@@ -957,6 +971,9 @@ set_tests_properties (H5PLUGIN-plugin PROPERTIES
### S W M R T E S T S
##############################################################################
# testflushrefresh.sh: flushrefresh
+# test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
+# testswmr.sh: swmr*
+# testvdsswmr.sh: vds_swmr*
##############################################################################
##############################################################################
@@ -1254,6 +1271,7 @@ if (HDF5_BUILD_GENERATORS)
gen_cross
gen_deflate
gen_filters
+ gen_idx
gen_new_array
gen_new_fill
gen_new_group
diff --git a/test/Makefile.am b/test/Makefile.am
index 50b2ee2..fee8cb6 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -29,10 +29,17 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_builddir)/src
# testcheck_version.sh: tcheck_version
# tetlinks_env.sh: links_env
# testflushrefresh.sh: flushrefresh
+# test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
+# testswmr.sh: swmr*
+# testvdsswmr.sh: vds_swmr*
TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh \
- testflushrefresh.sh
+ testswmr.sh testvdsswmr.sh testflushrefresh.sh test_usecases.sh
SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT) \
- testflushrefresh.sh
+ flushrefresh$(EXEEXT) use_append_chunk$(EXEEXT) use_append_mchunks$(EXEEXT) use_disable_mdc_flushes$(EXEEXT) \
+ swmr_generator$(EXEEXT) swmr_reader$(EXEEXT) swmr_writer$(EXEEXT) \
+ swmr_remove_reader$(EXEEXT) swmr_remove_writer$(EXEEXT) swmr_addrem_writer$(EXEEXT) \
+ swmr_sparse_reader$(EXEEXT) swmr_sparse_writer$(EXEEXT) swmr_start_write$(EXEEXT) \
+ vds_swmr_gen$(EXEEXT) vds_swmr_reader$(EXEEXT) vds_swmr_writer$(EXEEXT)
if HAVE_SHARED_CONDITIONAL
TEST_SCRIPT += test_plugin.sh
SCRIPT_DEPEND += plugin$(EXEEXT)
@@ -48,7 +55,7 @@ TEST_PROG= testhdf5 cache cache_api cache_tagging lheap ohdr stab gheap \
evict_on_close farray earray btree2 fheap \
pool accum hyperslab istore bittests dt_arith \
dtypes dsets cmpd_dset filter_fail extend external efc objcopy links unlink \
- big mtime fillval mount flush1 flush2 app_ref enum \
+ twriteorder big mtime fillval mount flush1 flush2 app_ref enum \
set_extent ttsafe enc_dec_plist enc_dec_plist_cross_platform\
getname vfd ntypes dangle dtransform reserved cross_read \
freespace mf vds file_image unregister cache_logging cork swmr
@@ -57,15 +64,21 @@ TEST_PROG= testhdf5 cache cache_api cache_tagging lheap ohdr stab gheap \
# error_test and err_compat are built at the same time as the other tests, but executed by testerror.sh.
# tcheck_version is used by testcheck_version.sh.
# accum_swmr_reader is used by accum.c.
+# atomic_writer and atomic_reader are standalone programs.
# links_env is used by testlinks_env.sh
# flushrefresh is used by testflushrefresh.sh.
+# use_append_chunk, use_append_mchunks and use_disable_mdc_flushes are used by test_usecases.sh
+# swmr_* files (besides swmr.c) are used by testswmr.sh.
+# vds_swmr_* files are used by testvdsswmr.sh
# 'make check' doesn't run them directly, so they are not included in TEST_PROG.
# Also build testmeta, which is used for timings test. It builds quickly,
# and this lets automake keep all its test programs in one place.
check_PROGRAMS=$(TEST_PROG) error_test err_compat tcheck_version \
- testmeta accum_swmr_reader \
- links_env flushrefresh \
- swmr_check_compat_vfd
+ testmeta accum_swmr_reader atomic_writer atomic_reader \
+ links_env flushrefresh use_append_chunk use_append_mchunks use_disable_mdc_flushes \
+ swmr_generator swmr_start_write swmr_reader swmr_writer swmr_remove_reader \
+ swmr_remove_writer swmr_addrem_writer swmr_sparse_reader swmr_sparse_writer \
+ swmr_check_compat_vfd vds_swmr_gen vds_swmr_reader vds_swmr_writer
if HAVE_SHARED_CONDITIONAL
check_PROGRAMS+= plugin
endif
@@ -77,7 +90,7 @@ endif
# --enable-build-all at configure time.
# The gen_old_* files can only be compiled with older versions of the library
# so do not appear in this list.
-BUILD_ALL_PROGS=gen_bad_ohdr gen_bogus gen_cross gen_deflate gen_filters gen_new_array \
+BUILD_ALL_PROGS=gen_bad_ohdr gen_bogus gen_cross gen_deflate gen_filters gen_idx gen_new_array \
gen_new_fill gen_new_group gen_new_mtime gen_new_super gen_noencoder \
gen_nullspace gen_udlinks space_overflow gen_filespace gen_specmetaread \
gen_sizes_lheap gen_file_image gen_plist
@@ -106,7 +119,7 @@ else
noinst_LTLIBRARIES=libh5test.la
endif
-libh5test_la_SOURCES=h5test.c testframe.c cache_common.c
+libh5test_la_SOURCES=h5test.c testframe.c cache_common.c swmr_common.c
# Use libhd5test.la to compile all of the tests
LDADD=libh5test.la $(LIBHDF5)
@@ -153,7 +166,7 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse
stdio.h5 sec2.h5 dtypes[0-9].h5 dtypes1[0].h5 dt_arith[1-2].h5 tattr.h5 \
tselect.h5 mtime.h5 unlink.h5 unicode.h5 coord.h5 \
fillval_[0-9].h5 fillval.raw mount_[0-9].h5 testmeta.h5 ttime.h5 \
- trefer[1-3].h5 tvltypes.h5 tvlstr.h5 tvlstr2.h5 \
+ trefer[1-3].h5 tvltypes.h5 tvlstr.h5 tvlstr2.h5 twriteorder.dat \
flush.h5 flush-swmr.h5 noflush.h5 noflush-swmr.h5 flush_extend.h5 \
flush_extend-swmr.h5 noflush_extend.h5 noflush_extend-swmr.h5 \
enum1.h5 titerate.h5 ttsafe.h5 tarray1.h5 tgenprop.h5 \
@@ -170,19 +183,27 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse
split_get_file_image_test-m.h5 split_get_file_image_test-r.h5 \
file_image_core_test.h5.copy unregister_filter_1.h5 unregister_filter_2.h5 \
vds_virt.h5 vds_dapl.h5 vds_src_[0-1].h5 \
+ swmr_data.h5 use_use_append_chunk.h5 use_append_mchunks.h5 use_disable_mdc_flushes.h5 \
flushrefresh.h5 flushrefresh_VERIFICATION_START \
flushrefresh_VERIFICATION_CHECKPOINT1 flushrefresh_VERIFICATION_CHECKPOINT2 \
- flushrefresh_VERIFICATION_DONE accum_swmr_big.h5 ohdr_swmr.h5 \
- cache_logging.h5 cache_logging.out \
- swmr[0-2].h5 tbogus.h5.copy
+ flushrefresh_VERIFICATION_DONE atomic_data accum_swmr_big.h5 ohdr_swmr.h5 \
+ test_swmr*.h5 cache_logging.h5 cache_logging.out vds_swmr.h5 vds_swmr_src_*.h5 \
+ swmr[0-2].h5 swmr_writer.out swmr_writer.log.* swmr_reader.out.* swmr_reader.log.* \
+ tbogus.h5.copy
+
# Sources for testhdf5 executable
testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \
tgenprop.c th5o.c th5s.c tcoords.c theap.c tid.c titerate.c tmeta.c tmisc.c \
trefer.c trefstr.c tselect.c tskiplist.c tsohm.c ttime.c ttst.c tunicode.c \
tvlstr.c tvltypes.c
+# Sources for Use Cases
+use_append_chunk_SOURCES=use_append_chunk.c use_common.c
+use_append_mchunks_SOURCES=use_append_mchunks.c use_common.c
+use_disable_mdc_flushes_SOURCES=use_disable_mdc_flushes.c
+
# Temporary files.
DISTCLEANFILES=testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh test_plugin.sh \
- testflushrefresh.sh
+ testswmr.sh testvdsswmr.sh test_usecases.sh testflushrefresh.sh
include $(top_srcdir)/config/conclude.am
diff --git a/test/POSIX_Order_Write_Test_Report.docx b/test/POSIX_Order_Write_Test_Report.docx
new file mode 100644
index 0000000..cf6d1dc
--- /dev/null
+++ b/test/POSIX_Order_Write_Test_Report.docx
Binary files differ
diff --git a/test/POSIX_Order_Write_Test_Report.pdf b/test/POSIX_Order_Write_Test_Report.pdf
new file mode 100644
index 0000000..0c678c4
--- /dev/null
+++ b/test/POSIX_Order_Write_Test_Report.pdf
Binary files differ
diff --git a/test/SWMR_POSIX_Order_UG.txt b/test/SWMR_POSIX_Order_UG.txt
new file mode 100644
index 0000000..2771af1
--- /dev/null
+++ b/test/SWMR_POSIX_Order_UG.txt
@@ -0,0 +1,94 @@
+POSIX Write Order Test Instructions
+===================================
+
+Purpose
+-------
+This documents shows the requirments, implementaion design and instructions
+of building and running the POSIX Write Order test. The name of the
+test is twriteorder and it resides in the test/ directory.
+
+Requirements
+------------
+The test is to verify that the write order is strictly consistent.
+The SWMR feature requires that the order of write is strictly consistent.
+"Strict consistency in computer science is the most stringent consistency
+model. It says that a read operation has to return the result of the
+latest write operation which occurred on that data item."--
+(http://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability).
+This is also an alternative form of what POSIX write require that after a
+write operation has returned success, all reads issued afterward should
+get the same data the write has written.
+
+Implementation Design
+---------------------
+The test simulates what SWMR does by writing chained blocks and see if
+they can be read back correctly.
+There is a writer process and a read process.
+The file is divided into 2KB partitions. Then writer writes 1 chained
+block, each of 1KB big, in each partition after the first partition.
+Each chained block has this structure:
+Byte 0-3: offset address of its child block. The last child uses 0 as NULL.
+Byte 4-1023: some artificial data.
+The child block address of Block 1 is NULL (0).
+The child block address of Block 2 is the offset address of Block 1.
+The child block address of Block n is the offset address of Block n-1.
+After all n blocks are written, the offset address of Block n is written
+to the offset 0 of the first partition.
+Therefore, by the time the offset address of Block n is written to this
+position, all n chain-linked blocks have been written.
+
+The other reader processes will try to read the address value at the
+offset 0. The value is initially NULL(0). When it changes to non-zero,
+it signifies the writer process has written all the chain-link blocks
+and they are ready for the reader processes to access.
+
+If the system, in which the writer and reader processes run, the readers
+will always get all chain-linked blocks correctly. If the order of write
+is not maintained, some reader processes may found unexpect block data.
+
+Building the Tests
+------------------
+The name of the test is twriteorder in the test directory. It is added
+to the test suite and is built during the "make" process and is run by
+the test_usecases.sh test. Users may inspect test/test_usecases.sh.in
+to see the examples of testing.
+
+Running the Tests
+-----------------
+twriteorder test accepts the following options:
+$ ./twriteorder -h
+usage: twriteorder [OPTIONS]
+ OPTIONS
+ -h Print a usage message and exit
+ -l w|r launch writer or reader only. [default: launch both]
+ -b N Block size [default: 1024]
+ -p N Partition size [default: 2048]
+ -n N Number of linked blocks [default: 512]
+
+More Examples
+-------------
+
+# run test with default parameters and launch both writer and reader
+#processes.
+$ twriteorder
+
+# run test with blocksize of 1000 bytes (default is 1024 bytes).
+$ twriteorder -b 1000
+
+# run test with partition size of 3000 bytes (default is 2048 bytes).
+$ twriteorder -p 3000
+
+# run test with 2000 linked blocks (default is 512 blocks).
+$ twriteorder -n 2000
+
+# Launch only the writer process.
+$ twriteorder -l w
+
+# Launch only the reader process.
+$ twriteorder -l r
+
+Note that if you want to launch the writer and the reader processes
+manually (for example in different machines sharing a common file system),
+you need to start the writer process (-l w) first, and then the reader
+process (-l r).
+
diff --git a/test/SWMR_UseCase_UG.txt b/test/SWMR_UseCase_UG.txt
new file mode 100644
index 0000000..e29944a
--- /dev/null
+++ b/test/SWMR_UseCase_UG.txt
@@ -0,0 +1,223 @@
+1. Title:
+ User Guide for SWMR Use Case Programs
+
+2. Purpose:
+ This is a User Guide of the SWMR Use Case programs. It descibes the use
+ case program and explain how to run them.
+
+2.1. Author and Dates:
+ Version 2: By Albert Cheng (acheng@hdfgroup.org), 2013/06/18.
+ Version 1: By Albert Cheng (acheng@hdfgroup.org), 2013/06/01.
+
+
+%%%%Use Case 1.7%%%%
+
+3. Use Case [1.7]:
+ Appending a single chunk
+
+3.1. Program name:
+ use_append_chunk
+
+3.2. Description:
+ Appending a single chunk of raw data to a dataset along an unlimited
+ dimension within a pre-created file and reading the new data back.
+
+ It first creates one 3d dataset using chunked storage, each chunk
+ is a (1, chunksize, chunksize) square. The dataset is (unlimited,
+ chunksize, chunksize). Data type is 2 bytes integer. It starts out
+ "empty", i.e., first dimension is 0.
+
+ The writer then appends planes, each of (1,chunksize,chunksize)
+ to the dataset. Fills each plan with plane number and then writes
+ it at the nth plane. Increases the plane number and repeats till
+ the end of dataset, when it reaches chunksize long. End product is
+ a chunksize^3 cube.
+
+ The reader is a separated process, running in parallel with
+ the writer. It reads planes from the dataset. It expects the
+ dataset is being changed (growing). It checks the unlimited dimension
+ (dimension[0]). When it increases, it will read in the new planes, one
+ by one, and verify the data correctness. (The nth plan should contain
+ all "n".) When the unlimited dimension grows to the chunksize (it
+ becomes a cube), that is the expected end of data, the reader exits.
+
+3.3. How to run the program:
+ Simplest way is
+ $ use_append_chunk
+
+ It creates a skeleton dataset (0,256,256) of shorts. Then fork off
+ a process, which becomes the reader process to read planes from the
+ dataset, while the original process continues as the writer process
+ to append planes onto the dataset.
+
+ Other possible options:
+
+ 1. -z option: different chunksize. Default is 256.
+ $ use_append_chunk -z 1024
+
+ It uses (1,1024,1024) chunks to produce a 1024^3 cube, about 2GB big.
+
+
+ 2. -f filename: different dataset file name
+ $ use_append_chunk -f /gpfs/tmp/append_data.h5
+
+ The data file is /gpfs/tmp/append_data.h5. This allows two independent
+ processes in separated compute nodes to access the datafile on the
+ shared /gpfs file system.
+
+
+ 3. -l option: launch only the reader or writer process.
+ $ use_append_chunk -f /gpfs/tmp/append_data.h5 -l w # in node X
+ $ use_append_chunk -f /gpfs/tmp/append_data.h5 -l r # in node Y
+
+ In node X, launch the writer process, which creates the data file
+ and appends to it.
+ In node Y, launch the read process to read the data file.
+
+ Note that you need to time the read process to start AFTER the write
+ process has created the skeleton data file. Otherwise, the reader
+ will encounter errors such as data file not found.
+
+ 4. -n option: number of planes to write/read. Default is same as the
+ chunk size as specified by option -z.
+ $ use_append_chunk -n 1000 # 1000 planes are writtern and read.
+
+ 5. -s option: use SWMR file access mode or not. Default is yes.
+ $ use_append_chunk -s 0
+
+ It opens the HDF5 data file without the SWMR access mode (0 means
+ off). This likely will result in error. This option is provided for
+ users to see the effect of the neede SWMR access mode for concurrent
+ access.
+
+3.4. Test Shell Script:
+ The Use Case program is installed in the test/ directory and is
+ compiled as part of the make process. A test script (test_usecases.sh)
+ is installed in the same directory to test the use case programs. The
+ test script is rather basic and is more for demonstrating how to
+ use the program.
+
+
+%%%%Use Case 1.8%%%%
+
+4. Use Case [1.8]:
+ Appending a hyperslab of multiple chunks.
+
+4.1. Program name:
+ use_append_mchunks
+
+4.2. Description:
+ Appending a hyperslab that spans several chunks of a dataset with
+ unlimited dimensions within a pre-created file and reading the new
+ data back.
+
+ It first creates one 3d dataset using chunked storage, each chunk is a (1,
+ chunksize, chunksize) square. The dataset is (unlimited, 2*chunksize,
+ 2*chunksize). Data type is 2 bytes integer. Therefore, each plane
+ consists of 4 chunks. It starts out "empty", i.e., first dimension is 0.
+
+ The writer then appends planes, each of (1,2*chunksize,2*chunksize)
+ to the dataset. Fills each plan with plane number and then writes
+ it at the nth plane. Increases the plane number and repeats till
+ the end of dataset, when it reaches chunksize long. End product is
+ a (2*chunksize)^3 cube.
+
+ The reader is a separated process, running in parallel with
+ the writer. It reads planes from the dataset. It expects the
+ dataset is being changed (growing). It checks the unlimited dimension
+ (dimension[0]). When it increases, it will read in the new planes, one
+ by one, and verify the data correctness. (The nth plan should contain
+ all "n".) When the unlimited dimension grows to the 2*chunksize (it
+ becomes a cube), that is the expected end of data, the reader exits.
+
+4.3. How to run the program:
+ Simplest way is
+ $ use_append_mchunks
+
+ It creates a skeleton dataset (0,512,512) of shorts. Then fork off
+ a process, which becomes the reader process to read planes from the
+ dataset, while the original process continues as the writer process
+ to append planes onto the dataset.
+
+ Other possible options:
+
+ 1. -z option: different chunksize. Default is 256.
+ $ use_append_mchunks -z 512
+
+ It uses (1,512,512) chunks to produce a 1024^3 cube, about 2GB big.
+
+
+ 2. -f filename: different dataset file name
+ $ use_append_mchunks -f /gpfs/tmp/append_data.h5
+
+ The data file is /gpfs/tmp/append_data.h5. This allows two independent
+ processes in separated compute nodes to access the datafile on the
+ shared /gpfs file system.
+
+
+ 3. -l option: launch only the reader or writer process.
+ $ use_append_mchunks -f /gpfs/tmp/append_data.h5 -l w # in node X
+ $ use_append_mchunks -f /gpfs/tmp/append_data.h5 -l r # in node Y
+
+ In node X, launch the writer process, which creates the data file
+ and appends to it.
+ In node Y, launch the read process to read the data file.
+
+ Note that you need to time the read process to start AFTER the write
+ process has created the skeleton data file. Otherwise, the reader
+ will encounter errors such as data file not found.
+
+ 4. -n option: number of planes to write/read. Default is same as the
+ chunk size as specified by option -z.
+ $ use_append_mchunks -n 1000 # 1000 planes are writtern and read.
+
+ 5. -s option: use SWMR file access mode or not. Default is yes.
+ $ use_append_mchunks -s 0
+
+ It opens the HDF5 data file without the SWMR access mode (0 means
+ off). This likely will result in error. This option is provided for
+ users to see the effect of the neede SWMR access mode for concurrent
+ access.
+
+4.4. Test Shell Script:
+ The Use Case program is installed in the test/ directory and is
+ compiled as part of the make process. A test script (test_usecases.sh)
+ is installed in the same directory to test the use case programs. The
+ test script is rather basic and is more for demonstrating how to
+ use the program.
+
+
+%%%%Use Case 1.9%%%%
+
+5. Use Case [1.9]:
+ Appending n-1 dimensional planes
+
+5.1. Program names:
+ use_append_chunk and use_append_mchunks
+
+5.2. Description:
+ Appending n-1 dimensional planes or regions to a chunked dataset where
+ the data does not fill the chunk.
+
+ This means the chunks have multiple planes and when a plane is written,
+ only one of the planes in each chunk is written. This use case is
+ achieved by extending the previous use cases 1.7 and 1.8 by defining the
+ chunks to have more than 1 plane. The -y option is implemented for both
+ use_append_chunk and use_append_mchunks.
+
+5.3. How to run the program:
+ Simplest way is
+ $ use_append_mchunks -y 5
+
+ It creates a skeleton dataset (0,512,512), with storage chunks (5,512,512)
+ of shorts. It then proceeds like use case 1.8 by forking off a reader
+ process. The original process continues as the writer process that
+ writes 1 plane at a time, updating parts of the chunks involved. The
+ reader reads 1 plane at a time, retrieving data from partial chunks.
+
+ The other possible options will work just like the two use cases.
+
+5.4. Test Shell Script:
+ Commands are added with -y options to demonstrate how the two use case
+ programs can be used as for this use case.
+
diff --git a/test/atomic_reader.c b/test/atomic_reader.c
new file mode 100644
index 0000000..94d9c74
--- /dev/null
+++ b/test/atomic_reader.c
@@ -0,0 +1,363 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/*-------------------------------------------------------------------------
+ *
+ * Created: atomic_reader.c
+ *
+ * Purpose: This is the "reader" part of the standalone test to check
+ * atomic read-write operation on a system.
+ * a) atomic_reader.c--the reader (this file)
+ * a) atomic_writer.c--the writer
+ * c) atomic_data--the name of the data file used by writer and reader
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if !defined(WIN32) && !defined(__MINGW32__)
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define FILENAME "atomic_data"
+#define READ_TRIES 20
+#define OPEN_TRIES 50
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static void usage(void);
+int verify(int fd, unsigned int k);
+void print_info(int *info, unsigned int lastr, unsigned iteration);
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: usage
+ *
+ * Purpose: To print the command line options
+ *
+ * Parameters: None
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("Usage: atomic_reader -n <number of integers to read> -i <number of iterations for reader>\n");
+ printf(" Note**The number of integers for option n has to be positive\n");
+ printf(" Note**The number of integers for option i has to be positive\n");
+ printf("\n");
+} /* usage() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify
+ *
+ * Purpose: To verify that the data read is the pattern expected.
+ * Each integer read should be the same as the index.
+ * When a difference is encountered, the remaining integers
+ * read should be the same as the previous index.
+ * For example, the pattern expected should be either:
+ * a) 01234567....n-1
+ * or
+ * b) if at index 4, a difference is encountered,
+ * the remaining integers should be all "3"s as:
+ * 012333333333333
+ *
+ * Parameters:
+ * fd -- the file descriptor
+ * k -- the number of integers to read
+ *
+ * Return:
+ * positive on success
+ * negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+verify(int fd, unsigned int k)
+{
+ unsigned int i; /* local index variable */
+ ssize_t bytes_read; /* the number of bytes read */
+ unsigned int *buf = NULL; /* buffer to hold data read */
+
+ /* Allocate buffer for data read */
+ if((buf = (unsigned int *)malloc(k * sizeof(unsigned int))) == NULL) {
+ printf("READER: error from malloc\n");
+ goto error;
+ } /* end if */
+
+ /* Position the file at the beginning */
+ if(lseek(fd, (off_t)0, SEEK_SET) < 0) {
+ printf("READER: error from lseek\n");
+ goto error;
+ } /* end if */
+
+ /* Read the whole file */
+ if((bytes_read = read(fd, buf, (k * sizeof(unsigned int)))) < 0) {
+ printf("READER: error from read\n");
+ goto error;
+ } /* end if */
+
+ /* Verify the bytes read are correct */
+ if(bytes_read != (ssize_t)(k*sizeof(unsigned int))) {
+ printf("READER: error from bytes read=%lu\n", (unsigned long)bytes_read);
+ goto error;
+ } /* end if */
+
+ /* Verify data read */
+ for(i=0; i < k; i++) {
+ if(buf[i] != i)
+ break;
+ } /* end for */
+
+ if(i < k) {
+ /* Compare the beginning and ending sentinel values */
+ if(buf[k-1] != (i-1)) {
+ printf("FAIL IN READER: ...beginning sentinel value=%u, i=%u\n", (i-1), i);
+ printf("FAIL IN READER: buf[%u]=%u\n", i-1, buf[i-1]);
+ printf("FAIL IN READER: buf[%u]=%u\n", i, buf[i]);
+ printf("FAIL IN READER: buf[%u]=%u\n", i+1, buf[i+1]);
+ printf("FAIL IN READER: ...ending sentinel value=%u\n", buf[k-1]);
+ goto error;
+ } /* end if */
+ } /* end if */
+
+ /* Free the buffer */
+ if(buf)
+ free(buf);
+ return 0;
+
+error:
+ /* Free the buffer */
+ if(buf)
+ free(buf);
+ return -1;
+} /* end verify() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: print_info
+ *
+ * Purpose: To print the statistics gathered for re-reads
+ *
+ * Parameters:
+ * info -- the array storing the statistics for re-reads
+ * lastr -- the last read completed
+ * iteration -- the current iteration
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+print_info(int *info, unsigned int lastr, unsigned iteration)
+{
+ unsigned j; /* local index variable */
+
+ printf("--------statistics for %u reads (iteration %u)--------\n", lastr, iteration);
+
+ for(j = 0; j <= READ_TRIES; j++)
+ printf("# of %u re-tries = %u\n", j, info[j]);
+
+ printf("--------end statistics for %u reads (iteration %u)--------\n", lastr, iteration);
+} /* print_info() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: To verify that the data read is the pattern expected.
+ * (1) Make sure the file opens successfully and the # of bytes read is as expected
+ * (2) Iterate the reader with i iterations
+ * (3) Read and verify n integers for each iteration
+ * (4) On verification error, re-read the data at most READ_TRIES
+ * times to see if correct data can be obtained
+ * (5) Print out statistics for the number of re-retries for each iteration
+ *
+ * Note:
+ * (a) The # of integers (via -n option) used by the writer and reader should be the same.
+ * (b) The data file used by the writer and reader should be the same.
+ *
+ * Future enhancement:
+ * 1) Provide default values for n and i and allow user to run with either 0 or 1 option
+ * 2) Use HDF library HD<system calls> instead of the system calls
+ * 3) Handle large sized buffer (gigabytes) if needed
+ *
+ * Return: Success: EXIT_SUCCESS
+ * Failure: EXIT_FAILURE
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(int argc, char *argv[])
+{
+ int fd = -1; /* file descriptor */
+ unsigned int j=0, i=0, m=0; /* local index variables */
+ int temp; /* temporary variable */
+ unsigned int iterations = 0; /* the input for "-i" */
+ unsigned num = 0; /* the input for "-n" */
+ int opt = 0; /* option char */
+ int info[READ_TRIES+1]; /* re-tries statistics */
+
+ /* Ensure the expected # of arguments */
+ if(argc != 5) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+
+ /* Parse command line options */
+ while((opt = getopt(argc, argv, "n:i:")) != -1) {
+ switch(opt) {
+ case 'n':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+ num = (unsigned int)temp;
+ break;
+ case 'i':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+ iterations = (unsigned int)temp;
+ break;
+ default:
+ printf("Invalid option encountered\n");
+ break;
+ } /* end switch */
+ } /* end while */
+
+ printf("READER: number of integers to read = %u; # of iterations = %d\n", num, iterations);
+
+ printf("\n");
+ for(i = 1; i <= iterations; i++) {
+ unsigned opens = OPEN_TRIES;
+
+ printf("READER: *****start iteration %u*****\n", i);
+
+ /* Ensure open and file size are done properly */
+ while(opens--) {
+ struct stat sinfo;
+
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ if((fd = open(FILENAME, O_RDONLY, 0644)) < 0) {
+ printf("READER: error from open--retry open again\n");
+ } else {
+ printf("READER: open succeed\n");
+
+ if((fstat(fd, &sinfo) == 0) &&
+ (sinfo.st_size == (off_t)(num * sizeof(unsigned int)))) {
+ printf("READER: file size is correct--%u\n", (unsigned int)sinfo.st_size);
+ break;
+ } /* end if */
+
+ printf("READER: error from fstat or file size of %u is incorrect--retry open again\n", (unsigned int)sinfo.st_size);
+ if(close(fd) < 0) {
+ printf("READER: error from close\n");
+ return EXIT_FAILURE;
+ } /* end if */
+ fd = -1;
+ } /* end else */
+
+ } /* end while */
+
+ if(fd < 0) {
+ printf("READER: *****open failure/incorrect file size for all %u tries, continue next iteration*****\n\n", OPEN_TRIES);
+ continue;
+ } /* end if */
+
+ memset(info, 0, sizeof(info));
+
+ /* Read and verify data */
+ for(j = 1; j <= num; j++) {
+
+ printf("READER: doing read %u\n", j);
+ if(verify(fd, num) < 0) {
+ printf("READER: error from read %u\n", j);
+
+ /* Perform re-read to see if correct data is obtained */
+ for(m = 1; m <= READ_TRIES; m++) {
+ printf("READER: ===============going to do re-read try %u\n", m);
+ if(verify(fd, num) < 0)
+ printf("READER: ===============error from re-read try %u\n", m);
+ else {
+ ++info[m];
+ printf("READER: ===============SUCCESS from re-read try %u\n", m);
+ break;
+ } /* end else */
+ } /* end for */
+
+ if(m > READ_TRIES) {
+ printf("READER: ===============error from all re-read tries: %u\n", READ_TRIES);
+ printf("READER:*****ERROR--stop on read %u\n", j);
+ break;
+ } /* end if */
+ } else {
+ ++info[0];
+ printf("READER: success from read %u\n", j);
+ } /* end else */
+
+ } /* end for */
+
+ /* Print the statistics for re-reads */
+ print_info(info, j-1, i);
+
+ /* Close the file */
+ if(close(fd) < 0) {
+ printf("READER: error from close\n");
+ return EXIT_FAILURE;
+ } /* end if */
+
+ printf("READER: *****end iteration %u*****\n\n", i);
+
+ } /* end for */
+
+ return EXIT_SUCCESS;
+}
+
+#else /* WIN32 / MINGW32 */
+
+int
+main(void)
+{
+ printf("Non-POSIX platform. Exiting.\n");
+ return EXIT_FAILURE;
+} /* end main() */
+
+#endif /* WIN32 / MINGW32 */
+
diff --git a/test/atomic_writer.c b/test/atomic_writer.c
new file mode 100644
index 0000000..ec1e8c9
--- /dev/null
+++ b/test/atomic_writer.c
@@ -0,0 +1,245 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: atomic_writer.c
+ *
+ * Purpose: This is the "writer" part of the standalone test to check
+ * atomic read-write operation on a system.
+ * a) atomic_writer.c--the writer (this file)
+ * b) atomic_reader.c--the reader
+ * c) atomic_data--the name of the data file used by writer and reader
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if !defined(WIN32) && !defined(__MINGW32__)
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define FILENAME "atomic_data"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: usage
+ *
+ * Purpose: To print information about the command line options
+ *
+ * Parameters: None
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("Usage: atomic_writer -n <number of integers to write> -i <number of iterations for writer>\n");
+ printf(" Note**The number of integers for option n has to be positive\n");
+ printf(" Note**The number of integers for option i has to be positive\n");
+ printf("\n");
+} /* usage() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: To write a series of integers to a file for the reader to verify the data.
+ * A write is atomic if the whole amount written in one operation is not interleaved
+ * with data from any other process.
+ * (1) Iterate with i iterations
+ * (2) Write a series of integers (0 to n-1) to the file with this pattern:
+ * offset 0: 0000000000000000000000000000000
+ * offset 1: 111111111111111111111111111111
+ * offset 2: 22222222222222222222222222222
+ * offset 3: 3333333333333333333333333333
+ * ...
+ * ...
+ * offset n-1: (n-1)
+ *
+ * At the end of the writes, the data in the file will be:
+ * 01234567........(n-1)
+ *
+ * Note:
+ * (a) The # of integers (via -n option) used by the writer and reader should be the same.
+ * (b) The data file used by the writer and reader should be the same.
+ *
+ * Future enhancement:
+ * 1) Provide default values for n and i and allow user to run with either 0 or 1 option
+ * 2) Use HDF library HD<system calls> instead of the system calls
+ * 3) Handle large sized buffer (gigabytes) if needed
+ *
+ * Return: Success: EXIT_SUCCESS
+ * Failure: EXIT_FAILURE
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(int argc, char *argv[])
+{
+ int fd = -1; /* file descriptor */
+ ssize_t bytes_wrote; /* the nubmer of bytes written */
+ unsigned int *buf = NULL; /* buffer to hold written data */
+ unsigned int n, u, i; /* local index variable */
+ int temp; /* temporary variable */
+ unsigned int iterations = 0; /* the input for "-i" */
+ unsigned int num = 0; /* the input for "-n" */
+ int opt = 0; /* option char */
+
+ /* Ensure the # of arguments is as expected */
+ if(argc != 5) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+
+ /* Parse command line options */
+ while((opt = getopt(argc, argv, "n:i:")) != -1) {
+ switch(opt) {
+ case 'n':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+ num = (unsigned int)temp;
+ break;
+ case 'i':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+ iterations = (unsigned int)temp;
+ break;
+ default:
+ printf("Invalid option encountered\n");
+ break;
+ } /* end switch */
+ } /* end while */
+
+ printf("WRITER: # of integers to write = %u; # of iterations = %d\n", num, iterations);
+
+ /* Remove existing data file if needed */
+ if(remove(FILENAME) < 0) {
+ if(errno == ENOENT)
+ printf("WRITER: remove %s--%s\n", FILENAME, strerror(errno));
+ else {
+ printf("WRITER: error from remove: %d--%s\n", errno, strerror(errno));
+ goto error;
+ } /* end else */
+ } else
+ printf("WRITER: %s is removed\n", FILENAME);
+
+ /* Create the data file */
+ if((fd = open(FILENAME, O_RDWR|O_TRUNC|O_CREAT, 0664)) < 0) {
+ printf("WRITER: error from open\n");
+ goto error;
+ } /* end if */
+
+ /* Allocate buffer for holding data to be written */
+ if((buf = (unsigned int *)malloc(num * sizeof(unsigned int))) == NULL) {
+ printf("WRITER: error from malloc\n");
+ if(fd >= 0 && close(fd) < 0)
+ printf("WRITER: error from close\n");
+ goto error;
+ } /* end if */
+
+ printf("\n");
+
+ for(i = 1; i <= iterations; i++) {
+ printf("WRITER: *****start iteration %u*****\n", i);
+
+ /* Write the series of integers to the file */
+ for(n = 0; n < num; n++) {
+
+ /* Set up data to be written */
+ for(u=0; u < num; u++)
+ buf[u] = n;
+
+ /* Position the file to the proper location */
+ if(lseek(fd, (off_t)(n*sizeof(unsigned int)), SEEK_SET) < 0) {
+ printf("WRITER: error from lseek\n");
+ goto error;
+ } /* end if */
+
+ /* Write the data */
+ if((bytes_wrote = write(fd, buf, ((num-n) * sizeof(unsigned int)))) < 0) {
+ printf("WRITER: error from write\n");
+ goto error;
+ } /* end if */
+
+ /* Verify the bytes written is correct */
+ if(bytes_wrote != (ssize_t)((num-n) * sizeof(unsigned int))) {
+ printf("WRITER: error from bytes written\n");
+ goto error;
+ } /* end if */
+ } /* end for */
+
+ printf("WRITER: *****end iteration %u*****\n\n", i);
+
+ } /* end for */
+
+ /* Close the file */
+ if(close(fd) < 0) {
+ printf("WRITER: error from close\n");
+ goto error;
+ } /* end if */
+
+ /* Free the buffer */
+ if(buf)
+ free(buf);
+
+ return EXIT_SUCCESS;
+
+error:
+ return EXIT_FAILURE;
+} /* end main() */
+
+#else /* WIN32 / MINGW32 */
+
+int
+main(void)
+{
+ printf("Non-POSIX platform. Exiting.\n");
+ return EXIT_FAILURE;
+} /* end main() */
+
+#endif /* WIN32 / MINGW32 */
+
diff --git a/test/gen_idx.c b/test/gen_idx.c
new file mode 100644
index 0000000..8c24198
--- /dev/null
+++ b/test/gen_idx.c
@@ -0,0 +1,126 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Purpose: This program is run to generate an HDF5 data file with datasets
+ * that use Fixed Array indexing method.
+ *
+ * To test compatibility, compile and run this program
+ * which will generate a file called "fixed_idx.h5".
+ * Move it to the test directory in the HDF5 v1.6/1.8 source tree.
+ * The test: test_idx_compatible() in dsets.c will read it.
+ */
+#include <assert.h>
+#include "hdf5.h"
+
+const char *FILENAME[1] = {
+ "fixed_idx.h5" /* file with datasets that use Fixed Array indexing method */
+};
+
+#define DSET "dset"
+#define DSET_FILTER "dset_filter"
+
+/*
+ * Function: gen_idx_file
+ *
+ * Purpose: Create a file with datasets that use Fixed Array indexing:
+ * one dataset: fixed dimension, chunked layout, w/o filters
+ * one dataset: fixed dimension, chunked layout, w/ filters
+ *
+ */
+static void gen_idx_file(void)
+{
+ hid_t fapl; /* file access property id */
+ hid_t fid; /* file id */
+ hid_t sid; /* space id */
+ hid_t dcpl; /* dataset creation property id */
+ hid_t did, did2; /* dataset id */
+ hsize_t dims[1] = {10}; /* dataset dimension */
+ hsize_t c_dims[1] = {2}; /* chunk dimension */
+ herr_t status; /* return status */
+ int i; /* local index variable */
+ int buf[10]; /* data buffer */
+
+
+ /* Get a copy of the file aaccess property */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ assert(fapl >= 0);
+
+ /* Set the "use the latest format" bounds for creating objects in the file */
+ status = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ assert(status >= 0);
+
+ /* Create dataset */
+ fid = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ assert(fid >= 0);
+
+ /* Create data */
+ for(i = 0; i < 10; i++)
+ buf[i] = i;
+
+ /* Set chunk */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ assert(dcpl >= 0);
+ status = H5Pset_chunk(dcpl, 1, c_dims);
+ assert(status >= 0);
+
+ sid = H5Screate_simple(1, dims, NULL);
+ assert(sid >= 0);
+
+ /* Create a 1D dataset */
+ did = H5Dcreate2(fid, DSET, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ assert(did >= 0);
+
+ /* Write to the dataset */
+ status = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
+ assert(status >= 0);
+
+#if defined (H5_HAVE_FILTER_DEFLATE)
+ /* set deflate data */
+ status = H5Pset_deflate(dcpl, 9);
+ assert(status >= 0);
+
+ /* Create and write the dataset */
+ did2 = H5Dcreate2(fid, DSET_FILTER, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ assert(did2 >= 0);
+
+ status = H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
+ assert(status >= 0);
+
+ /* Close the dataset */
+ status = H5Dclose(did2);
+ assert(status >= 0);
+#endif
+
+ /* closing */
+ status = H5Dclose(did);
+ assert(status >= 0);
+ status = H5Sclose(sid);
+ assert(status >= 0);
+ status = H5Pclose(dcpl);
+ assert(status >= 0);
+ status = H5Pclose(fapl);
+ assert(status >= 0);
+ status = H5Fclose(fid);
+ assert(status >= 0);
+} /* gen_idx_file() */
+
+int main(void)
+{
+ gen_idx_file();
+
+ return 0;
+}
+
diff --git a/test/swmr_addrem_writer.c b/test/swmr_addrem_writer.c
new file mode 100644
index 0000000..d3b5829
--- /dev/null
+++ b/test/swmr_addrem_writer.c
@@ -0,0 +1,443 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_addrem_writer.c
+ *
+ * Purpose: Adds and removes data to a randomly selected subset of the
+ * datasets in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_reader program. It is also run AFTER a sequential
+ * (not concurrent!) invoking of swmr_writer so the writer
+ * can dump a bunch of data into the datasets. Otherwise,
+ * there wouldn't be much to shrink :)
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+/* The maximum # of records to add/remove from the dataset in one step */
+#define MAX_SIZE_CHANGE 10
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, unsigned verbose);
+static int addrem_records(hid_t fid, unsigned verbose, unsigned long nops,
+ unsigned long flush_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, unsigned verbose)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dim[2]; /* Dataspace dimension */
+ unsigned u, v; /* Local index variable */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ HDfprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ HDfprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ if((sid = H5Dget_space(symbol_info[u][v].dsid)) < 0)
+ return -1;
+ if(2 != H5Sget_simple_extent_ndims(sid))
+ return -1;
+ if(H5Sget_simple_extent_dims(sid, dim, NULL) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = dim[1];
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: addrem_records
+ *
+ * Purpose: Adds/removes a specified number of records to random datasets
+ * to the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * unsigned long nops
+ * # of records to read/write in the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+addrem_records(hid_t fid, unsigned verbose, unsigned long nops, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+ hsize_t dim[2] = {1, 0}; /* Dataspace dimensions */
+ symbol_t buf[MAX_SIZE_CHANGE]; /* Write buffer */
+ unsigned long op_to_flush; /* # of operations before flush */
+ unsigned long u, v; /* Local index variables */
+
+ HDassert(fid > 0);
+
+ /* Reset the buffer */
+ HDmemset(&buf, 0, sizeof(buf));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate_simple(2, count, NULL)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Add and remove records to random datasets, according to frequency
+ * distribution */
+ op_to_flush = flush_count;
+ for(u=0; u<nops; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Decide whether to shrink or expand, and by how much */
+ count[1] = (hsize_t)HDrandom() % (MAX_SIZE_CHANGE * 2) + 1;
+
+ if(count[1] > MAX_SIZE_CHANGE) {
+ /* Add records */
+ count[1] -= MAX_SIZE_CHANGE;
+
+ /* Set the buffer's IDs (equal to its position) */
+ for(v=0; v<count[1]; v++)
+ buf[v].rec_id = (uint64_t)symbol->nrecords + (uint64_t)v;
+
+ /* Set the memory space to the correct size */
+ if(H5Sset_extent_simple(mem_sid, 2, count, NULL) < 0)
+ return -1;
+
+ /* Get the coordinates to write */
+ start[1] = symbol->nrecords;
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ if(H5Odisable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Extend the dataset's dataspace to hold the new record */
+ symbol->nrecords+= count[1];
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose the last record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &buf) < 0)
+ return -1;
+
+ /* Uncork the metadata cache */
+ if(H5Oenable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+ } /* end if */
+ else {
+ /* Shrink the dataset's dataspace */
+ if(count[1] > symbol->nrecords)
+ symbol->nrecords = 0;
+ else
+ symbol->nrecords -= count[1];
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+ } /* end else */
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ op_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == op_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ op_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_addrem_writer [-q] [-f <# of operations between flushing\n");
+ printf(" file contents>] [-r <random seed>] <# of operations>\n");
+ printf("\n");
+ printf("<# of operations between flushing file contents> should be 0 (for\n");
+ printf("no flushing) or between 1 and (<# of operations> - 1).\n");
+ printf("\n");
+ printf("<# of operations> must be specified.\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), flushing every 1000 operations\n");
+ printf("('-f 1000'), and will generate a random seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nops = 0; /* # of times to grow or shrink the dataset */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variable */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = HDatol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = HDatoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nops = HDatol(argv[u]);
+ if(nops <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nops <= 0)
+ usage();
+ if(flush_count >= nops)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\t# of operations between flushes = %ld\n", flush_count);
+ HDfprintf(stderr, "\t# of operations = %ld\n", nops);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stderr, "Using writer random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose)) < 0) {
+ HDfprintf(stderr, "Error opening skeleton file!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Adding and removing records\n");
+
+ /* Grow and shrink datasets */
+ if(addrem_records(fid, verbose, (unsigned long)nops, (unsigned long)flush_count) < 0) {
+ HDfprintf(stderr, "Error adding and removing records from datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "Error closing file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_check_compat_vfd.c b/test/swmr_check_compat_vfd.c
index d1134ee..6876ef5 100644
--- a/test/swmr_check_compat_vfd.c
+++ b/test/swmr_check_compat_vfd.c
@@ -18,12 +18,13 @@
*
* It is intended for use in shell scripts.
*/
+
#include "h5test.h"
/* This file needs to access the file driver testing code */
-#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
#define H5FD_TESTING
-#include "H5FDpkg.h" /* File drivers */
+#include "H5FDpkg.h" /* File drivers */
/*-------------------------------------------------------------------------
diff --git a/test/swmr_common.c b/test/swmr_common.c
new file mode 100644
index 0000000..ac17ddd
--- /dev/null
+++ b/test/swmr_common.c
@@ -0,0 +1,290 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_common.c
+ *
+ * Purpose: Utility functions for the SWMR test code.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+/* The SWMR data arrays:
+ *
+ * The code uses a 2-D jagged array of datasets. The first dimension is called
+ * the 'level' and there are five of them.
+ *
+ * #define NLEVELS 5
+ *
+ * The second dimension is the 'count' and there are quite a few datasets per
+ * 'level'.
+ *
+ * unsigned symbol_count[NLEVELS] = {100, 200, 400, 800, 1600};
+ *
+ * These datasets are created when the skeleton is generated and are initially
+ * empty. Each dataset has no upper bound on size (H5S_UNLIMITED). They
+ * are of compound type, with two members: an integer ID and an opaque
+ * 'data part'. The data part is not used by the SWMR testing.
+ *
+ * The SWMR testing will then randomly add and/or remove entries
+ * from these datasets. The selection of the level is skewed by a mapping
+ * table which preferentially hammers on the lower levels with their smaller
+ * number of datasets.
+ *
+ * static unsigned symbol_mapping[NMAPPING] = {0, 0, 0, 0, 1, 1, 2, 3, 4};
+ *
+ * The information about each dataset (name, hid_t, etc.) is stored in a
+ * separate array.
+ *
+ * symbol_info_t *symbol_info[NLEVELS];
+ */
+
+/* An array of dataset levels, used to select the level for a SWMR operation
+ * Note that this preferentially selects the lower levels with their smaller
+ * number of datasets.
+ */
+static unsigned symbol_mapping[NMAPPING] = {0, 0, 0, 0, 1, 1, 2, 3, 4};
+
+/* The number of datasets at each level */
+unsigned symbol_count[NLEVELS] = {100, 200, 400, 800, 1600};
+
+/* Array of dataset information entries (1 per dataset) */
+symbol_info_t *symbol_info[NLEVELS];
+
+
+/*-------------------------------------------------------------------------
+ * Function: choose_dataset
+ *
+ * Purpose: Selects a random dataset in the SWMR file
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: A pointer to information about a dataset.
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+symbol_info_t *
+choose_dataset(void)
+{
+ unsigned level; /* The level of the dataset */
+ unsigned offset; /* The "offset" of the dataset at that level */
+
+ /* Determine level of dataset */
+ level = symbol_mapping[HDrandom() % NMAPPING];
+
+ /* Determine the offset of the level */
+ offset = (unsigned)(HDrandom() % (int)symbol_count[level]);
+
+ return &symbol_info[level][offset];
+} /* end choose_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_symbol_datatype
+ *
+ * Purpose: Create's the HDF5 datatype used for elements in the SWMR
+ * testing datasets.
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: An HDF5 type ID
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+hid_t
+create_symbol_datatype(void)
+{
+ hid_t sym_type_id; /* Datatype ID for symbol */
+ hid_t opaq_type_id; /* Datatype ID for opaque part of record */
+
+ /* Create opaque datatype to represent other information for this record */
+ if((opaq_type_id = H5Tcreate(H5T_OPAQUE, (size_t)DTYPE_SIZE)) < 0)
+ return -1;
+
+ /* Create compound datatype for symbol */
+ if((sym_type_id = H5Tcreate(H5T_COMPOUND, sizeof(symbol_t))) < 0)
+ return -1;
+
+ /* Insert fields in symbol datatype */
+ if(H5Tinsert(sym_type_id, "rec_id", HOFFSET(symbol_t, rec_id), H5T_NATIVE_UINT64) < 0)
+ return -1;
+ if(H5Tinsert(sym_type_id, "info", HOFFSET(symbol_t, info), opaq_type_id) < 0)
+ return -1;
+
+ /* Close opaque datatype */
+ if(H5Tclose(opaq_type_id) < 0)
+ return -1;
+
+ return sym_type_id;
+} /* end create_symbol_datatype() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: generate_name
+ *
+ * Purpose: Generates a SWMR testing dataset name given a level and
+ * count.
+ * The name is in the format <name>-<level> (%u-%04u).
+ *
+ * Parameters: char *name_buf
+ * Buffer for the created name. Must be pre-allocated.
+ * Since the name is formulaic, this isn't considered an issue.
+ *
+ * unsigned level
+ * The dataset's level
+ *
+ * unsigned count
+ * The dataset's count
+ *
+ * Return: Success: 0
+ *
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+generate_name(char *name_buf, unsigned level, unsigned count)
+{
+ HDassert(name_buf);
+
+ sprintf(name_buf, "%u-%04u", level, count);
+
+ return 0;
+} /* end generate_name() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: generate_symbols
+ *
+ * Purpose: Initializes the global dataset infomration arrays.
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: 0
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+generate_symbols(void)
+{
+ unsigned u, v; /* Local index variables */
+
+ for(u = 0; u < NLEVELS; u++) {
+ symbol_info[u] = (symbol_info_t *)HDmalloc(symbol_count[u] * sizeof(symbol_info_t));
+ for(v = 0; v < symbol_count[u]; v++) {
+ char name_buf[64];
+
+ generate_name(name_buf, u, v);
+ symbol_info[u][v].name = (char *)HDmalloc(HDstrlen(name_buf) + 1);
+ HDstrcpy(symbol_info[u][v].name, name_buf);
+ symbol_info[u][v].dsid = -1;
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+ } /* end for */
+
+ return 0;
+} /* end generate_symbols() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: shutdown_symbols
+ *
+ * Purpose: Cleans up the global dataset information arrays.
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: 0
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+shutdown_symbols(void)
+{
+ unsigned u, v; /* Local index variables */
+
+ /* Clean up the symbols */
+ for(u = 0; u < NLEVELS; u++) {
+ for(v = 0; v < symbol_count[u]; v++)
+ HDfree(symbol_info[u][v].name);
+ HDfree(symbol_info[u]);
+ } /* end for */
+
+ return 0;
+} /* end shutdown_symbols() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: print_metadata_retries_info
+ *
+ * Purpose: To retrieve and print the collection of metadata retries for the file.
+ *
+ * Parameters: fid: the currently opened file identifier
+ *
+ * Return: Success: 0
+ * Failure: negative
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+print_metadata_retries_info(hid_t fid)
+{
+ H5F_retry_info_t info;
+ unsigned i;
+
+ /* Retrieve the collection of retries */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ return (-1);
+
+ /* Print information for each non-NULL retries[i] */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) {
+ unsigned power;
+ unsigned j;
+
+ if(NULL == info.retries[i])
+ continue;
+
+ HDfprintf(stderr, "Metadata read retries for item %u:\n", i);
+ power = 1;
+ for(j = 0; j < info.nbins; j++) {
+ if(info.retries[i][j])
+ HDfprintf(stderr, "\t# of retries for %u - %u retries: %u\n",
+ power, (power * 10) - 1, info.retries[i][j]);
+ power *= 10;
+ } /* end for */
+ } /* end for */
+
+ /* Free memory for each non-NULL retries[i] */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ H5free_memory(info.retries[i]);
+
+ return 0;
+} /* print_metadata_retries_info() */
diff --git a/test/swmr_common.h b/test/swmr_common.h
new file mode 100644
index 0000000..a2cee71
--- /dev/null
+++ b/test/swmr_common.h
@@ -0,0 +1,80 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef _SWMR_COMMON_H
+#define _SWMR_COMMON_H
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+
+/**********/
+/* Macros */
+/**********/
+
+#define NLEVELS 5 /* # of datasets in the SWMR test file */
+
+#define NMAPPING 9
+
+#define FILENAME "swmr_data.h5" /* SWMR test file name */
+#define DTYPE_SIZE 150 /* Data size in opaque type */
+
+/* The message sent by writer that the file open is done--releasing the file lock */
+#define WRITER_MESSAGE "SWMR_WRITER_MESSAGE"
+
+/************/
+/* Typedefs */
+/************/
+
+/* Information about a symbol/dataset */
+typedef struct {
+ char *name; /* Dataset name for symbol */
+ hid_t dsid; /* Dataset ID for symbol */
+ hsize_t nrecords; /* Number of records for the symbol */
+} symbol_info_t;
+
+/* A symbol's record */
+typedef struct {
+ uint64_t rec_id; /* ID for this record (unique in symbol) */
+ uint8_t info[DTYPE_SIZE]; /* "Other" information for this record */
+} symbol_t;
+
+/********************/
+/* Global Variables */
+/********************/
+H5TEST_DLLVAR symbol_info_t *symbol_info[NLEVELS];
+H5TEST_DLLVAR unsigned symbol_count[NLEVELS];
+
+/**************/
+/* Prototypes */
+/**************/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+H5TEST_DLL symbol_info_t * choose_dataset(void);
+H5TEST_DLL hid_t create_symbol_datatype(void);
+H5TEST_DLL int generate_name(char *name_buf, unsigned level, unsigned count);
+H5TEST_DLL int generate_symbols(void);
+H5TEST_DLL int shutdown_symbols(void);
+H5TEST_DLL int print_metadata_retries_info(hid_t fid);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SWMR_COMMON_H */
diff --git a/test/swmr_generator.c b/test/swmr_generator.c
new file mode 100644
index 0000000..a87879f
--- /dev/null
+++ b/test/swmr_generator.c
@@ -0,0 +1,386 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_generator.c
+ *
+ * Purpose: Functions for building and setting up the SWMR test file
+ * and datasets.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/*
+ * This file needs to access testing codefrom the H5O package.
+ */
+#define H5O_FRIEND /*suppress error about including H5Opkg */
+#define H5O_TESTING
+#include "H5Opkg.h" /* Object headers */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define CHUNK_SIZE 50 /* Chunk size for created datasets */
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int gen_skeleton(const char *filename, hbool_t verbose,
+ hbool_t swmr_write, int comp_level, const char *index_type,
+ unsigned random_seed);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: gen_skeleton
+ *
+ * Purpose: Creates the HDF5 file and datasets which will be used in
+ * the SWMR testing.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * hbool_t verbose
+ * Whether verbose console output is desired.
+ *
+ * hbool_t swmr_write
+ * Whether to create the file with SWMR writing enabled
+ *
+ * int comp_level
+ * The zlib compression level to use. -1 = no compression.
+ *
+ * const char *index_type
+ * The chunk index type (b1 | b2 | ea | fa)
+ *
+ * unsigned random_seed
+ * The random seed to store in the file. The sparse tests use
+ * this value.
+ *
+ * Return: Success: 0
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+gen_skeleton(const char *filename, hbool_t verbose, hbool_t swmr_write,
+ int comp_level, const char *index_type, unsigned random_seed)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl; /* File access property list */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t tid; /* Datatype for dataset elements */
+ hid_t sid; /* Dataspace ID */
+ hid_t aid; /* Attribute ID */
+ hsize_t dims[2] = {1, 0}; /* Dataset starting dimensions */
+ hsize_t max_dims[2] = {1, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t chunk_dims[2] = {1, CHUNK_SIZE}; /* Chunk dimensions */
+#ifdef FILLVAL_WORKS
+ symbol_t fillval; /* Dataset fill value */
+#endif /* FILLVAL_WORKS */
+ unsigned u, v; /* Local index variable */
+
+ HDassert(filename);
+ HDassert(index_type);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Can create a file for SWMR support with: (a) (write+latest-format) or (b) (SWMR write+non-latest-format) */
+ if(!swmr_write) {
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ }
+
+ /* There are two chunk indexes tested here.
+ * With one unlimited dimension, we get the extensible array index
+ * type, with two unlimited dimensions, we get a v2 B-tree.
+ */
+ if(!HDstrcmp(index_type, "b2"))
+ max_dims[0] = H5S_UNLIMITED;
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ HDfprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ HDfprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_small_data_block_size(fapl, (hsize_t)(50 * CHUNK_SIZE * DTYPE_SIZE));
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+
+#ifdef QAK
+ H5Pset_link_phase_change(fcpl, 0, 0);
+#endif /* QAK */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Creating file\n");
+
+ /* Create the file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC | (swmr_write ? H5F_ACC_SWMR_WRITE : 0), fcpl, fapl)) < 0)
+ return -1;
+
+ /* Close file creation property list */
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Create attribute with (shared) random number seed - for sparse test */
+ if((sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+ if((aid = H5Acreate2(fid, "seed", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Awrite(aid, H5T_NATIVE_UINT, &random_seed) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+
+ /* Create datatype for creating datasets */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(2, dims, max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ return -1;
+ if(comp_level >= 0) {
+ if(H5Pset_deflate(dcpl, (unsigned)comp_level) < 0)
+ return -1;
+ } /* end if */
+#ifdef FILLVAL_WORKS
+ /* Currently fill values do not work because they can bump the dataspace
+ * message to the second object header chunk. We should enable the fillval
+ * here when this is fixed. -NAF 8/11/11 */
+ HDmemset(&fillval, 0, sizeof(fillval));
+ fillval.rec_id = (uint64_t)ULLONG_MAX;
+ if(H5Pset_fill_value(dcpl, tid, &fillval) < 0)
+ return -1;
+#endif /* FILLVAL_WORKS */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Creating datasets\n");
+
+ /* Create the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ hid_t dsid; /* Dataset ID */
+ char name_buf[64];
+ hbool_t move_dataspace_message = FALSE; /* Whether to move the dataspace message out of object header chunk #0 */
+
+ generate_name(name_buf, u, v);
+ if((dsid = H5Dcreate2(fid, name_buf, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Determine if the dataspace message for this dataset should be
+ * moved out of chunk #0 of the object header
+ * (Set to TRUE for every fourth dataset)
+ */
+ move_dataspace_message = !(HDrandom() % 4);
+ if(move_dataspace_message) {
+ unsigned chunk_num; /* Object header chunk # for dataspace message */
+
+ /* Move the dataspace message to a new object header chunk */
+ if(H5O_msg_move_to_new_chunk_test(dsid, H5O_SDSPACE_ID) < 0)
+ return -1;
+
+ /* Retrieve the chunk # for the dataspace message */
+ chunk_num = UINT_MAX;
+ if(H5O_msg_get_chunkno_test(dsid, H5O_SDSPACE_ID, &chunk_num) < 0)
+ return -1;
+ /* Should not be in chunk #0 for now */
+ if(0 == chunk_num)
+ return -1;
+ } /* end if */
+
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ } /* end for */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close everythign */
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Tclose(tid) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ return 0;
+} /* end gen_skeleton() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_generator [-q] [-s] [-c <deflate compression level>]\n");
+ printf(" [-i <index type>] [-r <random seed>]\n");
+ printf("\n");
+ printf("NOTE: The random seed option is only used by the sparse test. Other\n");
+ printf(" tests specify the random seed as a reader/writer option.\n");
+ printf("\n");
+ printf("<deflate compression level> should be -1 (for no compression) or 0-9\n");
+ printf("\n");
+ printf("<index type> should be b2 or ea\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), no SWMR_WRITE mode (no '-s' given) no\n");
+ printf("compression ('-c -1'), v1 b-tree indexing (-i b1), and will generate a random\n");
+ printf("seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+} /* end usage() */
+
+int main(int argc, const char *argv[])
+{
+ int comp_level = -1; /* Compression level (-1 is no compression) */
+ hbool_t verbose = TRUE; /* Whether to emit some informational messages */
+ hbool_t swmr_write = FALSE; /* Whether to create file with SWMR_WRITE access */
+ const char *index_type = "b1"; /* Chunk index type */
+ hbool_t use_seed = FALSE; /* Set to TRUE if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variables */
+ int temp;
+
+ /* Parse command line options */
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* Compress dataset chunks */
+ case 'c':
+ comp_level = HDatoi(argv[u + 1]);
+ if(comp_level < -1 || comp_level > 9)
+ usage();
+ u += 2;
+ break;
+
+ /* Chunk index type */
+ case 'i':
+ index_type = argv[u + 1];
+ if(HDstrcmp(index_type, "ea")
+ && HDstrcmp(index_type, "b2"))
+ usage();
+ u += 2;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = TRUE;
+ temp = HDatoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = FALSE;
+ u++;
+ break;
+
+ /* Run with SWMR_WRITE */
+ case 's':
+ swmr_write = TRUE;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ } /* end while */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\tswmr writes %s\n", swmr_write ? "on" : "off");
+ HDfprintf(stderr, "\tcompression level = %d\n", comp_level);
+ HDfprintf(stderr, "\tindex type = %s\n", index_type);
+ } /* end if */
+
+ /* Set the random seed */
+ if(!use_seed) {
+ struct timeval t;
+
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stderr, "Using generator random seed (used in sparse test only): %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating skeleton file: %s\n", FILENAME);
+
+ /* Generate file skeleton */
+ if(gen_skeleton(FILENAME, verbose, swmr_write, comp_level, index_type, random_seed) < 0) {
+ HDfprintf(stderr, "Error generating skeleton file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_reader.c b/test/swmr_reader.c
new file mode 100644
index 0000000..af4a450
--- /dev/null
+++ b/test/swmr_reader.c
@@ -0,0 +1,549 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_reader.c
+ *
+ * Purpose: Reads data from a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_writer program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int check_dataset(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ const char *sym_name, symbol_t *record, hid_t rec_sid);
+static int read_records(const char *filename, hbool_t verbose, FILE *verbose_file,
+ unsigned random_seed, unsigned long nseconds, unsigned poll_time,
+ unsigned ncommon, unsigned nrandom);
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+static hid_t symbol_tid = -1; /* The type ID for the SWMR datasets */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_dataset
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: hid_t fid
+ * The SWMR test file's ID.
+ *
+ * hbool_t verbose
+ * Whether verbose console output is desired.
+ *
+ * FILE *verbose_file
+ * File handle for verbose output
+ *
+ * const char *sym_name
+ * The name of the dataset from which to read.
+ *
+ * symbol_t *record
+ * Memory for the record. Must be pre-allocated.
+ *
+ * hid_t rec_sid
+ * The memory dataspace for access. It's always the same so
+ * there is no need to re-create it every time this function
+ * is called.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+check_dataset(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ const char *sym_name, symbol_t *record, hid_t rec_sid)
+{
+ hid_t dsid; /* Dataset ID */
+ hid_t file_sid; /* Dataset's space ID */
+ hssize_t snpoints; /* Number of elements in dataset */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+
+ HDassert(fid >= 0);
+ HDassert(sym_name);
+ HDassert(record);
+ HDassert(rec_sid >= 0);
+
+ /* Open dataset for symbol */
+ if((dsid = H5Dopen2(fid, sym_name, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(dsid)) < 0)
+ return -1;
+
+ /* Get the number of elements (= records, for 1-D datasets) */
+ if((snpoints = H5Sget_simple_extent_npoints(file_sid)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Symbol = '%s', # of records = %lld\n", sym_name, (long long)snpoints);
+
+ /* Check if there are records for symbol */
+ if(snpoints > 0) {
+ /* Choose the last record in the dataset */
+ start[1] = (hsize_t)(snpoints - 1);
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Read record from dataset */
+ record->rec_id = (uint64_t)ULLONG_MAX;
+ if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
+ return -1;
+
+ /* Verify record value */
+ if(record->rec_id != start[1]) {
+ struct timeval tv;
+
+ HDgettimeofday(&tv, NULL);
+
+ if(verbose) {
+ HDfprintf(verbose_file, "*** ERROR ***\n");
+ HDfprintf(verbose_file, "Incorrect record value!\n");
+ HDfprintf(verbose_file, "Time = %llu.%llu, Symbol = '%s', # of records = %lld, record->rec_id = %llu\n", (unsigned long long)tv.tv_sec, (unsigned long long)tv.tv_usec, sym_name, (long long)snpoints, (unsigned long long)record->rec_id);
+ } /* end if */
+ return -1;
+ } /* end if */
+ } /* end if */
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Close dataset for symbol */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+
+ return 0;
+} /* end check_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: read_records
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * The "common" datasets are a random selection from among
+ * the level 0 datasets. The "random" datasets are a random
+ * selection from among all the file's datasets. This scheme
+ * ensures that the level 0 datasets are interrogated vigorously.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * hbool_t verbose
+ * Whether verbose console output is desired.
+ *
+ * FILE *verbose_file
+ * File handle for verbose output
+ *
+ * unsigned random_seed
+ * Random seed for the file (used for verbose logging)
+ *
+ * unsigned long nseconds
+ * The amount of time to read records (ns).
+ *
+ * unsigned poll_time
+ * The amount of time to sleep (s).
+ *
+ * unsigned ncommon
+ * The number of common/non-random datasets that will be opened.
+ *
+ * unsigned nrandom
+ * The number of random datasets that will be opened.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+read_records(const char *filename, hbool_t verbose, FILE *verbose_file,
+ unsigned random_seed, unsigned long nseconds, unsigned poll_time,
+ unsigned ncommon, unsigned nrandom)
+{
+ time_t start_time; /* Starting time */
+ time_t curr_time; /* Current time */
+ symbol_info_t **sym_com = NULL; /* Pointers to array of common dataset IDs */
+ symbol_info_t **sym_rand = NULL; /* Pointers to array of random dataset IDs */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hid_t fid; /* SWMR test file ID */
+ hid_t fapl; /* file access property list */
+ symbol_t record; /* The record to read from the dataset */
+ unsigned read_attempts; /* The number of read attempts for metadata */
+ unsigned v; /* Local index variable */
+
+ HDassert(filename);
+ HDassert(nseconds != 0);
+ HDassert(poll_time != 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record read, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Choosing datasets\n");
+
+ /* Allocate space for 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Allocate array to hold pointers to symbols for common datasets */
+ if(NULL == (sym_com = (symbol_info_t **)HDmalloc(sizeof(symbol_info_t *) * ncommon)))
+ return -1;
+
+ /* Open the common datasets */
+ for(v = 0; v < ncommon; v++) {
+ unsigned offset; /* Offset of symbol to use */
+
+ /* Determine the offset of the symbol, within level 0 symbols */
+ /* (level 0 symbols are the most common symbols) */
+ offset = (unsigned)((unsigned)HDrandom() % symbol_count[0]);
+ sym_com[v] = &symbol_info[0][offset];
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Common symbol #%u = '%s'\n", v, symbol_info[0][offset].name);
+ } /* end for */
+ } /* end if */
+
+ /* Allocate space for 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Allocate array to hold pointers to symbols for random datasets */
+ if(NULL == (sym_rand = (symbol_info_t **)HDmalloc(sizeof(symbol_info_t *) * nrandom)))
+ return -1;
+
+ /* Determine the random datasets */
+ for(v = 0; v < nrandom; v++) {
+ symbol_info_t *sym; /* Symbol to use */
+
+ /* Determine the symbol, within all symbols */
+ if(NULL == (sym = choose_dataset()))
+ return -1;
+ sym_rand[v] = sym;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Random symbol #%u = '%s'\n", v, sym->name);
+ } /* end for */
+ } /* end if */
+
+ /* Create a dataspace for the record to read */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Reading records\n");
+
+ /* Get the starting time */
+ start_time = HDtime(NULL);
+ curr_time = start_time;
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Log I/O when verbose output it enbabled */
+ if(verbose) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_reader.log.%u", random_seed);
+
+ H5Pset_fapl_log(fapl, verbose_name, H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+ } /* end if */
+
+
+ /* Loop over reading records until [at least] the correct # of seconds have passed */
+ while(curr_time < (time_t)(start_time + (time_t)nseconds)) {
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Opening file: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+
+ /* Check 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Checking common symbols\n");
+
+ /* Iterate over common datasets */
+ for(v = 0; v < ncommon; v++) {
+ /* Check common dataset */
+ if(check_dataset(fid, verbose, verbose_file, sym_com[v]->name, &record, mem_sid) < 0)
+ return -1;
+ HDmemset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Check 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Checking random symbols\n");
+
+ /* Iterate over random datasets */
+ for(v = 0; v < nrandom; v++) {
+ /* Check random dataset */
+ if(check_dataset(fid, verbose, verbose_file, sym_rand[v]->name, &record, mem_sid) < 0)
+ return -1;
+ HDmemset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing file\n");
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ /* Sleep for the appropriate # of seconds */
+ HDsleep(poll_time);
+
+ /* Retrieve the current time */
+ curr_time = HDtime(NULL);
+ } /* end while */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the fapl */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing datasets\n");
+
+ /* Close 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Release array holding dataset ID's for random datasets */
+ HDfree(sym_rand);
+ } /* end if */
+
+ /* Close 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Release array holding dataset ID's for common datasets */
+ HDfree(sym_com);
+ } /* end if */
+
+ return 0;
+} /* end read_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_reader [-q] [-s <# of seconds to sleep between polling>]\n");
+ printf(" [-h <# of common symbols to poll>] [-l <# of random symbols to poll>]\n");
+ printf(" [-r <random seed>] <# of seconds to test>\n");
+ printf("\n");
+ printf("<# of seconds to test> must be specified.\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), 1 second between polling ('-s 1'),\n");
+ printf("5 common symbols to poll ('-h 5'), 10 random symbols to poll ('-l 10'),\n");
+ printf("and will generate a random seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ long nseconds = 0; /* # of seconds to test */
+ int poll_time = 1; /* # of seconds between polling */
+ int ncommon = 5; /* # of common symbols to poll */
+ int nrandom = 10; /* # of random symbols to poll */
+ hbool_t verbose = TRUE; /* Whether to emit some informational messages */
+ FILE *verbose_file = NULL; /* File handle for verbose output */
+ hbool_t use_seed = FALSE; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variables */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of common symbols to poll */
+ case 'h':
+ ncommon = HDatoi(argv[u + 1]);
+ if(ncommon < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* # of random symbols to poll */
+ case 'l':
+ nrandom = HDatoi(argv[u + 1]);
+ if(nrandom < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = FALSE;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = TRUE;
+ temp = HDatoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* # of seconds between polling */
+ case 's':
+ poll_time = HDatoi(argv[u + 1]);
+ if(poll_time < 0)
+ usage();
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nseconds = HDatol(argv[u]);
+ if(nseconds <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nseconds <= 0)
+ usage();
+ if(poll_time >= nseconds)
+ usage();
+
+ /* Set the random seed */
+ if(!use_seed) {
+ struct timeval t;
+
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+
+ /* Open output file */
+ if(verbose) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_reader.out.%u", random_seed);
+ if(NULL == (verbose_file = HDfopen(verbose_name, "w"))) {
+ HDfprintf(stderr, "Can't open verbose output file!\n");
+ HDexit(1);
+ }
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(verbose_file, "Parameters:\n");
+ HDfprintf(verbose_file, "\t# of seconds between polling = %d\n", poll_time);
+ HDfprintf(verbose_file, "\t# of common symbols to poll = %d\n", ncommon);
+ HDfprintf(verbose_file, "\t# of random symbols to poll = %d\n", nrandom);
+ HDfprintf(verbose_file, "\t# of seconds to test = %ld\n", nseconds);
+ } /* end if */
+
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stdout, "Using reader random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0) {
+ HDfprintf(stderr, "Error generating symbol names!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Create datatype for creating datasets */
+ if((symbol_tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Reading records from datasets */
+ if(read_records(FILENAME, verbose, verbose_file, random_seed, (unsigned long)nseconds, (unsigned)poll_time, (unsigned)ncommon, (unsigned)nrandom) < 0) {
+ HDfprintf(stderr, "Error reading records from datasets (random_seed = %u)!\n", random_seed);
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing objects\n");
+
+ /* Close objects created */
+ if(H5Tclose(symbol_tid) < 0) {
+ HDfprintf(stderr, "Error closing symbol datatype!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
+
diff --git a/test/swmr_remove_reader.c b/test/swmr_remove_reader.c
new file mode 100644
index 0000000..689b010
--- /dev/null
+++ b/test/swmr_remove_reader.c
@@ -0,0 +1,519 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_remove_reader.c
+ *
+ * Purpose: Reads data from a randomly selected subset of the datasets
+ * in the SWMR test file. Unlike the regular reader, these
+ * datasets will be shrinking.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_remove_writer program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+static hid_t symbol_tid = -1;
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int check_dataset(hid_t fid, unsigned verbose, const char *sym_name,
+ symbol_t *record, hid_t rec_sid);
+static int read_records(const char *filename, unsigned verbose, unsigned long nseconds,
+ unsigned poll_time, unsigned ncommon, unsigned nrandom);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_dataset
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: hid_t fid
+ * The SWMR test file's ID.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * const char *sym_name
+ * The name of the dataset from which to read.
+ *
+ * symbol_t *record
+ * Memory for the record. Must be pre-allocated.
+ *
+ * hid_t rec_sid
+ * The memory dataspace for access. It's always the same so
+ * there is no need to re-create it every time this function
+ * is called.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+check_dataset(hid_t fid, unsigned verbose, const char *sym_name, symbol_t *record,
+ hid_t rec_sid)
+{
+ hid_t dsid; /* Dataset ID */
+ hid_t file_sid; /* Dataset's space ID */
+ hssize_t snpoints; /* Number of elements in dataset */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+
+ HDassert(fid >= 0);
+ HDassert(sym_name);
+ HDassert(record);
+ HDassert(rec_sid >= 0);
+
+ /* Open dataset for symbol */
+ if((dsid = H5Dopen2(fid, sym_name, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(dsid)) < 0)
+ return -1;
+
+ /* Get the number of elements (= records, for 1-D datasets) */
+ if((snpoints = H5Sget_simple_extent_npoints(file_sid)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Symbol = '%s', # of records = %lld\n", sym_name, (long long)snpoints);
+
+ /* Check if there are records for symbol */
+ if(snpoints > 0) {
+ /* Choose a random record in the dataset, choosing the last record half
+ * the time */
+ start[1] = (hsize_t)(HDrandom() % (snpoints * 2));
+ if(start[1] > (hsize_t)(snpoints - 1))
+ start[1] = (hsize_t)(snpoints - 1);
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Read record from dataset */
+#ifdef FILLVAL_WORKS
+ /* When shrinking the dataset, we cannot guarantee that the buffer will
+ * even be touched, unless there is a fill value. Since fill values do
+ * not work with SWMR currently (see note in swmr_generator.c), we
+ * simply initialize rec_id to 0. */
+ record->rec_id = (uint64_t)ULLONG_MAX - 1;
+#else /* FILLVAL_WORKS */
+ record->rec_id = (uint64_t)0;
+#endif /* FILLVAL_WORKS */
+ if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
+ return -1;
+
+ /* Verify record value - note that it may be the fill value, because the
+ * chunk may be deleted before the object header has the updated
+ * dimensions */
+ if(record->rec_id != start[1] && record->rec_id != (uint64_t)0) {
+ HDfprintf(stderr, "*** ERROR ***\n");
+ HDfprintf(stderr, "Incorrect record value!\n");
+ HDfprintf(stderr, "Symbol = '%s', # of records = %lld, record->rec_id = %llx\n", sym_name, (long long)snpoints, (unsigned long long)record->rec_id);
+ return -1;
+ } /* end if */
+ } /* end if */
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Close dataset for symbol */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+
+ return 0;
+} /* end check_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: read_records
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * The "common" datasets are a random selection from among
+ * the level 0 datasets. The "random" datasets are a random
+ * selection from among all the file's datasets. This scheme
+ * ensures that the level 0 datasets are interrogated vigorously.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * unsigned long nseconds
+ * The amount of time to read records (ns).
+ *
+ * unsigned poll_time
+ * The amount of time to sleep (s).
+ *
+ * unsigned ncommon
+ * The number of common/non-random datasets that will be opened.
+ *
+ * unsigned nrandom
+ * The number of random datasets that will be opened.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+read_records(const char *filename, unsigned verbose, unsigned long nseconds,
+ unsigned poll_time, unsigned ncommon, unsigned nrandom)
+{
+ time_t start_time; /* Starting time */
+ time_t curr_time; /* Current time */
+ symbol_info_t **sym_com = NULL; /* Pointers to array of common dataset IDs */
+ symbol_info_t **sym_rand = NULL; /* Pointers to array of random dataset IDs */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hid_t fid; /* SWMR test file ID */
+ hid_t fapl; /* File access property list */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned v; /* Local index variable */
+
+ HDassert(filename);
+ HDassert(nseconds != 0);
+ HDassert(poll_time != 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Choosing datasets\n");
+
+ /* Allocate space for 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Allocate array to hold pointers to symbols for common datasets */
+ if(NULL == (sym_com = (symbol_info_t **)HDmalloc(sizeof(symbol_info_t *) * ncommon)))
+ return -1;
+
+ /* Open the common datasets */
+ for(v = 0; v < ncommon; v++) {
+ unsigned offset; /* Offset of symbol to use */
+
+ /* Determine the offset of the symbol, within level 0 symbols */
+ /* (level 0 symbols are the most common symbols) */
+ offset = (unsigned)(HDrandom() % symbol_count[0]);
+ sym_com[v] = &symbol_info[0][offset];
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Common symbol #%u = '%s'\n", v, symbol_info[0][offset].name);
+ } /* end for */
+ } /* end if */
+
+ /* Allocate space for 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Allocate array to hold pointers to symbols for random datasets */
+ if(NULL == (sym_rand = (symbol_info_t **)HDmalloc(sizeof(symbol_info_t *) * nrandom)))
+ return -1;
+
+ /* Determine the random datasets */
+ for(v = 0; v < nrandom; v++) {
+ symbol_info_t *sym; /* Symbol to use */
+
+ /* Determine the symbol, within all symbols */
+ if(NULL == (sym = choose_dataset()))
+ return -1;
+ sym_rand[v] = sym;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Random symbol #%u = '%s'\n", v, sym->name);
+ } /* end for */
+ } /* end if */
+
+ /* Create a dataspace for the record to read */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Reading records\n");
+
+ /* Get the starting time */
+ start_time = HDtime(NULL);
+ curr_time = start_time;
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Loop over reading records until [at least] the correct # of seconds have passed */
+ while(curr_time < (time_t)(start_time + (time_t)nseconds)) {
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening file: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+
+ /* Check 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Checking common symbols\n");
+
+ /* Iterate over common datasets */
+ for(v = 0; v < ncommon; v++) {
+ /* Check common dataset */
+ if(check_dataset(fid, verbose, sym_com[v]->name, &record, mem_sid) < 0)
+ return -1;
+ HDmemset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Check 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Checking random symbols\n");
+
+ /* Iterate over random datasets */
+ for(v = 0; v < nrandom; v++) {
+ /* Check random dataset */
+ if(check_dataset(fid, verbose, sym_rand[v]->name, &record, mem_sid) < 0)
+ return -1;
+ HDmemset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing file\n");
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ /* Sleep for the appropriate # of seconds */
+ HDsleep(poll_time);
+
+ /* Retrieve the current time */
+ curr_time = HDtime(NULL);
+ } /* end while */
+
+ /* Close the fapl */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing datasets\n");
+
+ /* Close 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Release array holding dataset ID's for random datasets */
+ HDfree(sym_rand);
+ } /* end if */
+
+ /* Close 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Release array holding dataset ID's for common datasets */
+ HDfree(sym_com);
+ } /* end if */
+
+ return 0;
+} /* end read_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_remove_reader [-q] [-s <# of seconds to sleep between\n");
+ printf(" polling>] [-h <# of common symbols to poll>] [-l <# of random symbols\n");
+ printf(" to poll>] [-r <random seed>] <# of seconds to test>\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), 1 second between polling ('-s 1'),\n");
+ printf("5 common symbols to poll ('-h 5'), 10 random symbols to poll ('-l 10'),\n");
+ printf("and will generate a random seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ long nseconds = 0; /* # of seconds to test */
+ int poll_time = 1; /* # of seconds between polling */
+ int ncommon = 5; /* # of common symbols to poll */
+ int nrandom = 10; /* # of random symbols to poll */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variables */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of common symbols to poll */
+ case 'h':
+ ncommon = HDatoi(argv[u + 1]);
+ if(ncommon < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* # of random symbols to poll */
+ case 'l':
+ nrandom = HDatoi(argv[u + 1]);
+ if(nrandom < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = HDatoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* # of seconds between polling */
+ case 's':
+ poll_time = HDatoi(argv[u + 1]);
+ if(poll_time < 0)
+ usage();
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nseconds = HDatol(argv[u]);
+ if(nseconds <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nseconds <= 0)
+ usage();
+ if(poll_time >= nseconds)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\t# of seconds between polling = %d\n", poll_time);
+ HDfprintf(stderr, "\t# of common symbols to poll = %d\n", ncommon);
+ HDfprintf(stderr, "\t# of random symbols to poll = %d\n", nrandom);
+ HDfprintf(stderr, "\t# of seconds to test = %ld\n", nseconds);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stderr, "Using reader random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0) {
+ HDfprintf(stderr, "Error generating symbol names!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Create datatype for creating datasets */
+ if((symbol_tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Reading records from datasets */
+ if(read_records(FILENAME, verbose, (unsigned long)nseconds, (unsigned)poll_time, (unsigned)ncommon, (unsigned)nrandom) < 0) {
+ HDfprintf(stderr, "Error reading records from datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close objects created */
+ if(H5Tclose(symbol_tid) < 0) {
+ HDfprintf(stderr, "Error closing symbol datatype!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_remove_writer.c b/test/swmr_remove_writer.c
new file mode 100644
index 0000000..ddf7ede
--- /dev/null
+++ b/test/swmr_remove_writer.c
@@ -0,0 +1,381 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_remove_writer.c
+ *
+ * Purpose: Removes data from a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_remove_reader program. It is also run AFTER a sequential
+ * (not concurrent!) invoking of swmr_writer so the writer
+ * can dump a bunch of data into the datasets. Otherwise,
+ * there wouldn't be much to shrink :)
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+/* The maximum number of records to remove in one step */
+#define MAX_REMOVE_SIZE 10
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, unsigned verbose, unsigned old);
+static int remove_records(hid_t fid, unsigned verbose, unsigned long nshrinks,
+ unsigned long flush_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, unsigned verbose, unsigned old)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dim[2]; /* Dataspace dimensions */
+ unsigned u, v; /* Local index variable */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ if(!old) {
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ }
+
+#ifdef QAK
+/* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ HDfprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ HDfprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ if((sid = H5Dget_space(symbol_info[u][v].dsid)) < 0)
+ return -1;
+ if(2 != H5Sget_simple_extent_ndims(sid))
+ return -1;
+ if(H5Sget_simple_extent_dims(sid, dim, NULL) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = dim[1];
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: remove_records
+ *
+ * Purpose: Removes a specified number of records from random datasets in
+ * the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * unsigned long nshrinks
+ * # of records to remove from the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+remove_records(hid_t fid, unsigned verbose, unsigned long nshrinks, unsigned long flush_count)
+{
+ unsigned long shrink_to_flush; /* # of removals before flush */
+ hsize_t dim[2] = {1,0}; /* Dataspace dimensions */
+ unsigned long u, v; /* Local index variables */
+
+ HDassert(fid >= 0);
+
+ /* Remove records from random datasets, according to frequency distribution */
+ shrink_to_flush = flush_count;
+ for(u = 0; u < nshrinks; u++) {
+ symbol_info_t *symbol; /* Symbol to remove record from */
+ hsize_t remove_size; /* Size to reduce dataset dimension by */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Shrink the dataset's dataspace */
+ remove_size = (hsize_t)HDrandom() % MAX_REMOVE_SIZE + 1;
+ if(remove_size > symbol->nrecords)
+ symbol->nrecords = 0;
+ else
+ symbol->nrecords -= remove_size;
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ shrink_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == shrink_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ shrink_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_remove_writer [-q] [-o] [-f <# of shrinks between flushing\n");
+ printf(" file contents>] [-r <random seed>] <# of shrinks>\n");
+ printf("\n");
+ printf("<# of shrinks between flushing file contents> should be 0 (for no\n");
+ printf("flushing) or between 1 and (<# of shrinks> - 1)\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), latest format when opening file (no '-o' given),\n");
+ printf("flushing every 1000 shrinks ('-f 1000'), and will generate a random seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nshrinks = 0; /* # of times to shrink the dataset */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned old = 0; /* Whether to use non-latest-format when opening file */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variable */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = HDatol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = HDatoi(argv[u + 1]);
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* Use non-latest-format when opening file */
+ case 'o':
+ old = 1;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nshrinks = HDatol(argv[u]);
+ if(nshrinks <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nshrinks <= 0)
+ usage();
+ if(flush_count >= nshrinks)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\t# of shrinks between flushes = %ld\n", flush_count);
+ HDfprintf(stderr, "\t# of shrinks = %ld\n", nshrinks);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stderr, "Using writer random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose, old)) < 0) {
+ HDfprintf(stderr, "Error opening skeleton file!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Removing records\n");
+
+ /* Remove records from datasets */
+ if(remove_records(fid, verbose, (unsigned long)nshrinks, (unsigned long)flush_count) < 0) {
+ HDfprintf(stderr, "Error removing records from datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "Error closing file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_sparse_reader.c b/test/swmr_sparse_reader.c
new file mode 100644
index 0000000..f755cd2
--- /dev/null
+++ b/test/swmr_sparse_reader.c
@@ -0,0 +1,449 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_sparse_reader.c
+ *
+ * Purpose: Reads data from a randomly selected subset of the datasets
+ * in the SWMR test file. Unlike the regular reader, these
+ * datasets will be shrinking.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_sparse_writer program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define TIMEOUT 300
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+static hid_t symbol_tid = (-1);
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int check_dataset(hid_t fid, unsigned verbose, const symbol_info_t *symbol,
+ symbol_t *record, hid_t rec_sid);
+static int read_records(const char *filename, unsigned verbose, unsigned long nrecords,
+ unsigned poll_time, unsigned reopen_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_dataset
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: hid_t fid
+ * The SWMR test file's ID.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * const symbol_info_t *symbol
+ * The dataset from which to read (the ID is in the struct).
+ * Must be pre-allocated.
+ *
+ * symbol_t *record
+ * Memory for the record. Must be pre-allocated.
+ *
+ * hid_t rec_sid
+ * The memory dataspace for access. It's always the same so
+ * there is no need to re-create it every time this function
+ * is called.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+check_dataset(hid_t fid, unsigned verbose, const symbol_info_t *symbol, symbol_t *record,
+ hid_t rec_sid)
+{
+ hid_t dsid; /* Dataset ID */
+ hid_t file_sid; /* Dataset's space ID */
+ hsize_t start[2] = {0, 0}; /* Hyperslab selection values */
+ hsize_t count[2] = {1, 1}; /* Hyperslab selection values */
+
+ HDassert(fid >= 0);
+ HDassert(symbol);
+ HDassert(record);
+ HDassert(rec_sid >= 0);
+
+ /* Open dataset for symbol */
+ if((dsid = H5Dopen2(fid, symbol->name, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(dsid)) < 0)
+ return -1;
+
+ /* Choose the random record in the dataset (will be the same as chosen by
+ * the writer) */
+ start[1] = (hsize_t)HDrandom() % symbol->nrecords;
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Symbol = '%s', location = %lld\n", symbol->name, (long long)start);
+
+ /* Read record from dataset */
+ record->rec_id = (uint64_t)ULLONG_MAX;
+ if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
+ return -1;
+
+ /* Verify record value */
+ if(record->rec_id != start[1]) {
+ HDfprintf(stderr, "*** ERROR ***\n");
+ HDfprintf(stderr, "Incorrect record value!\n");
+ HDfprintf(stderr, "Symbol = '%s', location = %lld, record->rec_id = %llu\n", symbol->name, (long long)start, (unsigned long long)record->rec_id);
+ return -1;
+ } /* end if */
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Close dataset for symbol */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+
+ return 0;
+} /* end check_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: read_records
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * unsigned long nrecords
+ * The total number of records to read.
+ *
+ * unsigned poll_time
+ * The amount of time to sleep (s).
+ *
+ * unsigned reopen_count
+ *
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+read_records(const char *filename, unsigned verbose, unsigned long nrecords,
+ unsigned poll_time, unsigned reopen_count)
+{
+ hid_t fid; /* File ID */
+ hid_t aid; /* Attribute ID */
+ time_t start_time; /* Starting time */
+ hid_t mem_sid; /* Memory dataspace ID */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned seed; /* Seed for random number generator */
+ unsigned iter_to_reopen = reopen_count; /* # of iterations until reopen */
+ unsigned long u; /* Local index variable */
+ hid_t fapl;
+
+ HDassert(filename);
+ HDassert(poll_time != 0);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ H5Pset_fclose_degree(fapl, H5F_CLOSE_SEMI);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening file: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+
+ /* Seed the random number generator with the attribute in the file */
+ if((aid = H5Aopen(fid, "seed", H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Aread(aid, H5T_NATIVE_UINT, &seed) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+ HDsrandom(seed);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to read */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Reading records\n");
+
+ /* Get the starting time */
+ start_time = HDtime(NULL);
+
+ /* Read records */
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol = NULL; /* Symbol (dataset) */
+ htri_t attr_exists; /* Whether the sequence number attribute exists */
+ unsigned long file_u; /* Attribute sequence number (writer's "u") */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Fill in "nrecords" field. Note that this depends on the writer
+ * using the same algorithm and "nrecords" */
+ symbol->nrecords = nrecords / 5;
+
+ /* Wait until we can read the dataset */
+ do {
+ /* Check if sequence attribute exists */
+ if((attr_exists = H5Aexists_by_name(fid, symbol->name, "seq", H5P_DEFAULT)) < 0)
+ return -1;
+
+ if(attr_exists) {
+ /* Read sequence number attribute */
+ if((aid = H5Aopen_by_name(fid, symbol->name, "seq", H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Aread(aid, H5T_NATIVE_ULONG, &file_u) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+
+ /* Check if sequence number is at least u - if so, this should
+ * guarantee that this record has been written */
+ if(file_u >= u)
+ break;
+ } /* end if */
+
+ /* Check for timeout */
+ if(HDtime(NULL) >= (time_t)(start_time + (time_t)TIMEOUT)) {
+ HDfprintf(stderr, "Reader timed out\n");
+ return -1;
+ } /* end if */
+
+ /* Pause */
+ HDsleep(poll_time);
+
+ /* Retrieve and print the collection of metadata read retries */
+ if(print_metadata_retries_info(fid) < 0)
+ HDfprintf(stderr, "Warning: could not obtain metadata retries info\n");
+
+ /* Reopen the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+ iter_to_reopen = reopen_count;
+ } while(1);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Checking dataset %lu\n", u);
+
+ /* Check dataset */
+ if(check_dataset(fid, verbose, symbol, &record, mem_sid) < 0)
+ return -1;
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Check for reopen */
+ iter_to_reopen--;
+ if(iter_to_reopen == 0) {
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Reopening file: %s\n", filename);
+
+ /* Retrieve and print the collection of metadata read retries */
+ if(print_metadata_retries_info(fid) < 0)
+ HDfprintf(stderr, "Warning: could not obtain metadata retries info\n");
+
+ /* Reopen the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+ iter_to_reopen = reopen_count;
+ } /* end if */
+ } /* end while */
+
+ /* Retrieve and print the collection of metadata read retries */
+ if(print_metadata_retries_info(fid) < 0)
+ HDfprintf(stderr, "Warning: could not obtain metadata retries info\n");
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ return 0;
+} /* end read_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_sparse_reader [-q] [-s <# of seconds to wait for writer>]\n");
+ printf(" [-n <# of reads between reopens>] <# of records>\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), 1 second wait ('-s 1') and 1 read\n");
+ printf("between reopens ('-r 1')\n");
+ printf("\n");
+ printf("Note that the # of records *must* be the same as that supplied to\n");
+ printf("swmr_sparse_writer\n");
+ printf("\n");
+ HDexit(1);
+} /* end usage() */
+
+int main(int argc, const char *argv[])
+{
+ long nrecords = 0; /* # of records to read */
+ int poll_time = 1; /* # of seconds to sleep when waiting for writer */
+ int reopen_count = 1; /* # of reads between reopens */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned u; /* Local index variables */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of reads between reopens */
+ case 'n':
+ reopen_count = HDatoi(argv[u + 1]);
+ if(reopen_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* # of seconds between polling */
+ case 's':
+ poll_time = HDatoi(argv[u + 1]);
+ if(poll_time < 0)
+ usage();
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to read */
+ nrecords = HDatol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\t# of seconds between polling = %d\n", poll_time);
+ HDfprintf(stderr, "\t# of reads between reopens = %d\n", reopen_count);
+ HDfprintf(stderr, "\t# of records to read = %ld\n", nrecords);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0) {
+ HDfprintf(stderr, "Error generating symbol names!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Create datatype for creating datasets */
+ if((symbol_tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Reading records from datasets */
+ if(read_records(FILENAME, verbose, (unsigned long) nrecords, (unsigned)poll_time, (unsigned)reopen_count) < 0) {
+ HDfprintf(stderr, "Error reading records from datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close objects created */
+ if(H5Tclose(symbol_tid) < 0) {
+ HDfprintf(stderr, "Error closing symbol datatype!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_sparse_writer.c b/test/swmr_sparse_writer.c
new file mode 100644
index 0000000..13b21c2
--- /dev/null
+++ b/test/swmr_sparse_writer.c
@@ -0,0 +1,454 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+ /*-------------------------------------------------------------------------
+ *
+ * Created: swmr_sparse_writer.c
+ *
+ * Purpose: Writes data to a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_sparse_reader program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+#ifdef OUT
+#define BUSY_WAIT 100000
+#endif /* OUT */
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, unsigned verbose);
+static int add_records(hid_t fid, unsigned verbose, unsigned long nrecords,
+ unsigned long flush_count);
+static void usage(void);
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, unsigned verbose)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t aid; /* Attribute ID */
+ unsigned seed; /* Seed for random number generator */
+ unsigned u, v; /* Local index variable */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ HDfprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ HDfprintf(stderr,"mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening datasets\n");
+
+ /* Seed the random number generator with the attribute in the file */
+ if((aid = H5Aopen(fid, "seed", H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Aread(aid, H5T_NATIVE_UINT, &seed) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+ HDsrandom(seed);
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return(-1);
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: add_records
+ *
+ * Purpose: Writes a specified number of records to random datasets in
+ * the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * unsigned long nrecords
+ * # of records to write to the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}; /* Hyperslab selection values */
+ hsize_t count[2] = {1, 1}; /* Hyperslab selection values */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned long rec_to_flush; /* # of records left to write before flush */
+#ifdef OUT
+ volatile int dummy; /* Dummy varialbe for busy sleep */
+#endif /* OUT */
+ hsize_t dim[2] = {1,0}; /* Dataspace dimensions */
+ unsigned long u, v; /* Local index variables */
+
+ HDassert(fid >= 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Add records to random datasets, according to frequency distribution */
+ rec_to_flush = flush_count;
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+ hid_t aid; /* Attribute ID */
+ hbool_t corked; /* Whether the dataset was corked */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* If this is the first time the dataset has been opened, extend it and
+ * add the sequence attribute */
+ if(symbol->nrecords == 0) {
+ symbol->nrecords = nrecords / 5;
+ dim[1] = symbol->nrecords;
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ if(H5Odisable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+ corked = TRUE;
+
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ if((file_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+ if((aid = H5Acreate2(symbol->dsid, "seq", H5T_NATIVE_ULONG, file_sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+ } /* end if */
+ else {
+ if((aid = H5Aopen(symbol->dsid, "seq", H5P_DEFAULT)) < 0)
+ return -1;
+ corked = FALSE;
+ } /* end else */
+
+ /* Get the coordinate to write */
+ start[1] = (hsize_t)HDrandom() % symbol->nrecords;
+
+ /* Set the record's ID (equal to its position) */
+ record.rec_id = start[1];
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose a random record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
+ return -1;
+
+ /* Write the sequence number attribute. Since we synchronize the random
+ * number seed, the readers will always generate the same sequence of
+ * randomly chosen datasets and offsets. Therefore, and because of the
+ * flush dependencies on the object header, the reader will be
+ * guaranteed to see the written data if the sequence attribute is >=u.
+ */
+ if(H5Awrite(aid, H5T_NATIVE_ULONG, &u) < 0)
+ return -1;
+
+ /* Close the attribute */
+ if(H5Aclose(aid) < 0)
+ return -1;
+
+ /* Uncork the metadata cache, if it's been */
+ if(corked)
+ if(H5Oenable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ rec_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == rec_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ rec_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+
+#ifdef OUT
+ /* Busy wait, to let readers catch up */
+ /* If this is removed, also remove the BUSY_WAIT symbol
+ * at the top of the file.
+ */
+ dummy = 0;
+ for(v=0; v<BUSY_WAIT; v++)
+ dummy++;
+ if((unsigned long)dummy != v)
+ return -1;
+#endif /* OUT */
+
+ } /* end for */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_sparse_writer [-q] [-f <# of records to write between\n");
+ printf(" flushing file contents>] <# of records>\n");
+ printf("\n");
+ printf("<# of records to write between flushing file contents> should be 0\n");
+ printf("(for no flushing) or between 1 and (<# of records> - 1)\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given) and flushing every 1000 records\n");
+ printf("('-f 1000')\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nrecords = 0; /* # of records to append */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned u; /* Local index variable */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = HDatol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nrecords = HDatol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nrecords <= 0)
+ usage();
+ if(flush_count >= nrecords)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\t# of records between flushes = %ld\n", flush_count);
+ HDfprintf(stderr, "\t# of records to write = %ld\n", nrecords);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose)) < 0) {
+ HDfprintf(stderr, "Error opening skeleton file!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Adding records\n");
+
+ /* Append records to datasets */
+ if(add_records(fid, verbose, (unsigned long)nrecords, (unsigned long)flush_count) < 0) {
+ HDfprintf(stderr, "Error appending records to datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "Error closing file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_start_write.c b/test/swmr_start_write.c
new file mode 100644
index 0000000..26a3dab
--- /dev/null
+++ b/test/swmr_start_write.c
@@ -0,0 +1,713 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_start_write.c
+ *
+ * Purpose: This program enables SWMR writing mode via H5Fstart_swmr_write().
+ * It writes data to a randomly selected subset of the datasets
+ * in the SWMR test file; and it is intended to run concurrently
+ * with the swmr_reader program.
+ *
+ * NOTE: The routines in this program are basically copied and modified from
+ * swmr*.c.
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t create_file(const char *filename, hbool_t verbose,
+ FILE *verbose_file, unsigned random_seed);
+static int create_datasets(hid_t fid, int comp_level, hbool_t verbose,
+ FILE *verbose_file, const char *index_type);
+static int create_close_datasets(hid_t fid, int comp_level, hbool_t verbose,
+ FILE *verbose_file);
+static int open_datasets(hid_t fid, hbool_t verbose, FILE *verbose_file);
+static hid_t open_file(const char *filename, hbool_t verbose,
+ FILE *verbose_file);
+static int add_records(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ unsigned long nrecords, unsigned long flush_count);
+static void usage(void);
+
+#define CHUNK_SIZE 50 /* Chunk size for created datasets */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_file
+ *
+ * Purpose: Creates the HDF5 file (without SWMR access) which
+ * which will be used for testing H5Fstart_swmr_write().
+ *
+ * Parameters:
+ * filename: The SWMR test file's name.
+ * verbose: whether verbose console output is desired.
+ * verbose_file: file pointer for verbose output
+ * random_seed: The random seed to store in the file.
+ * The sparse tests use this value.
+ *
+ * Return: Success: the file ID
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+create_file(const char *filename, hbool_t verbose, FILE *verbose_file,
+ unsigned random_seed)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl; /* File access property list */
+ hid_t sid; /* Dataspace ID */
+ hid_t aid; /* Attribute ID */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* We ALWAYS select the latest file format for SWMR */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+ if(verbose) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_start_write.log.%u", random_seed);
+
+ H5Pset_fapl_log(fapl, verbose_name, H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+ } /* end if */
+
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Creating file without SWMR access\n");
+
+ /* Create the file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ return -1;
+
+ /* Close file creation property list */
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Create attribute with (shared) random number seed - for sparse test */
+ if((sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+ if((aid = H5Acreate2(fid, "seed", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Awrite(aid, H5T_NATIVE_UINT, &random_seed) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+
+ return fid;
+} /* end create_file() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_datasets
+ *
+ * Purpose: Create datasets (and keep them opened) which will be used for testing
+ * H5Fstart_swmr_write().
+ *
+ * Parameters:
+ * fid: file ID for the SWMR test file
+ * comp_level: the compresssion level
+ * index_type: The chunk index type (b1 | b2 | ea | fa)
+ * verbose: whether verbose console output is desired.
+ * verbose_file: file pointer for verbose output
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+create_datasets(hid_t fid, int comp_level, hbool_t verbose, FILE *verbose_file,
+ const char *index_type)
+{
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t tid; /* Datatype for dataset elements */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dims[2] = {1, 0}; /* Dataset starting dimensions */
+ hsize_t max_dims[2] = {1, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t chunk_dims[2] = {1, CHUNK_SIZE}; /* Chunk dimensions */
+ unsigned u, v; /* Local index variable */
+
+ HDassert(index_type);
+
+ /* Create datatype for creating datasets */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* There are two chunk indexes tested here.
+ * With one unlimited dimension, we get the extensible array index
+ * type, with two unlimited dimensions, we get a v-2 B-tree.
+ */
+ if(!HDstrcmp(index_type, "b2"))
+ max_dims[0] = H5S_UNLIMITED;
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(2, dims, max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ return -1;
+ if(comp_level >= 0) {
+ if(H5Pset_deflate(dcpl, (unsigned)comp_level) < 0)
+ return -1;
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Creating datasets\n");
+
+ /* Create the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+
+ if((symbol_info[u][v].dsid = H5Dcreate2(fid, symbol_info[u][v].name, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = 0;
+
+ } /* end for */
+
+ return 0;
+} /* create_datasets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_close_datasets
+ *
+ * Purpose: Create and close datasets which will be used for testing
+ * H5Fstart_swmr_write().
+ *
+ * Parameters:
+ * fid: file ID for the SWMR test file
+ * comp_level: the compresssion level
+ * verbose: whether verbose console output is desired.
+ * verbose_file: file pointer for verbose output
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+create_close_datasets(hid_t fid, int comp_level, hbool_t verbose, FILE *verbose_file)
+{
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t tid; /* Datatype for dataset elements */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dims[2] = {1, 0}; /* Dataset starting dimensions */
+ hsize_t max_dims[2] = {1, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t chunk_dims[2] = {1, CHUNK_SIZE}; /* Chunk dimensions */
+ unsigned u, v; /* Local index variable */
+
+ /* Create datatype for creating datasets */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(2, dims, max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ return -1;
+ if(comp_level >= 0) {
+ if(H5Pset_deflate(dcpl, (unsigned)comp_level) < 0)
+ return -1;
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Creating datasets\n");
+
+ /* Create the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ hid_t dsid; /* Dataset ID */
+ char name_buf[64];
+
+ generate_name(name_buf, u, v);
+ if((dsid = H5Dcreate2(fid, name_buf, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ } /* end for */
+
+ /* Closing */
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ return 0;
+} /* create_close_datasets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_file
+ *
+ * Purpose: Opens the HDF5 test file without SWMR access.
+ *
+ * Parameters:
+ * filename: The filename of the HDF5 file to open
+ * verbose: whether or not to emit verbose console messages
+ * verbose_file: file pointer for verbose output
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_file(const char *filename, hbool_t verbose, FILE *verbose_file)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Opening the file without SWMR access: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ return fid;
+} /* Open file() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_datasets
+ *
+ * Purpose: Opens the datasets.
+ *
+ * Parameters:
+ * filename: the filename of the SWMR HDF5 file to open
+ * verbose: whether or not to emit verbose console messages
+ * verbose_file: file pointer for verbose output
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+open_datasets(hid_t fid, hbool_t verbose, FILE *verbose_file)
+{
+ unsigned u, v; /* Local index variable */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+
+ return 0;
+} /* open_datasets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: add_records
+ *
+ * Purpose: Writes a specified number of records to random datasets in
+ * the SWMR test file.
+ *
+ * Parameters:
+ * fid: The file ID of the SWMR HDF5 file
+ * verbose: Whether or not to emit verbose console messages
+ * verbose_file: file pointer for verbose output
+ * nrecords: # of records to write to the datasets
+ * flush_count: # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+add_records(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ unsigned long nrecords, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+ hsize_t dim[2] = {1, 0}; /* Dataspace dimensions */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned long rec_to_flush; /* # of records left to write before flush */
+ unsigned long u, v; /* Local index variables */
+
+ HDassert(fid >= 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Add records to random datasets, according to frequency distribution */
+ rec_to_flush = flush_count;
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Set the record's ID (equal to its position) */
+ record.rec_id = symbol->nrecords;
+
+ /* Get the coordinate to write */
+ start[1] = symbol->nrecords;
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ if(H5Odisable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Extend the dataset's dataspace to hold the new record */
+ symbol->nrecords++;
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose the last record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
+ return -1;
+
+ /* Uncork the metadata cache */
+ if(H5Oenable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ rec_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == rec_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ rec_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+} /* add_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_start_write [-f <# of records to write between flushing file contents>]\n");
+ printf(" [-i <index type>] [-c <deflate compression level>]\n");
+ printf(" [-r <random seed>] [-q] <# of records>\n");
+ printf("\n");
+ printf("<# of records to write between flushing file contents> should be 0\n");
+ printf("(for no flushing) or between 1 and (<# of records> - 1).\n");
+ printf("\n");
+ printf("<index type> should be b2 or ea\n");
+ printf("\n");
+ printf("<deflate compression level> should be -1 (for no compression) or 0-9\n");
+ printf("\n");
+ printf("<# of records> must be specified.\n");
+ printf("\n");
+ printf("Defaults to flushing every 10000 records ('-f 10000'),\n");
+ printf("v1 b-tree indexing (-i b1), compression ('-c -1'),\n");
+ printf("will generate a random seed (no -r given), and verbose (no '-q' given)\n");
+ printf("\n");
+ HDexit(1);
+} /* usage() */
+
+/*
+ * Can test with different scenarios as below:
+ * 1) create_file(), create_datasets(), H5Fstart_swmr_write(), add_records(), H5Fclose().
+ * 2) create_file(), create_close_datasets(), open_datasets(), H5Fstart_swmr_write(), add_records(), H5Fclose().
+ * 3) create_file(), create_close_datasets(), H5Fclose(),
+ * open_file(), open_dataset(), H5Fstart_swmr_write(), add_records(), H5Fclose().
+ */
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nrecords = 0; /* # of records to append */
+ long flush_count = 10000; /* # of records to write between flushing file */
+ hbool_t verbose = TRUE; /* Whether to emit some informational messages */
+ FILE *verbose_file = NULL; /* File handle for verbose output */
+ hbool_t use_seed = FALSE; /* Set to TRUE if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ int comp_level = -1; /* Compression level (-1 is no compression) */
+ const char *index_type = "b1"; /* Chunk index type */
+ unsigned u; /* Local index variable */
+ int temp; /* Temporary variable */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* Compress dataset chunks */
+ case 'c':
+ comp_level = HDatoi(argv[u + 1]);
+ if(comp_level < -1 || comp_level > 9)
+ usage();
+ u += 2;
+ break;
+
+ /* Chunk index type */
+ case 'i':
+ index_type = argv[u + 1];
+ if(HDstrcmp(index_type, "ea")
+ && HDstrcmp(index_type, "b2"))
+ usage();
+ u += 2;
+ break;
+
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = HDatol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = FALSE;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = TRUE;
+ temp = HDatoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nrecords = HDatol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+
+ if(nrecords <= 0)
+ usage();
+ if(flush_count >= nrecords)
+ usage();
+
+ /* Set the random seed */
+ if(!use_seed) {
+ struct timeval t;
+
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+
+ /* Open output file */
+ if(verbose) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_writer.out.%u", random_seed);
+ if(NULL == (verbose_file = HDfopen(verbose_name, "w"))) {
+ HDfprintf(stderr, "Can't open verbose output file!\n");
+ HDexit(1);
+ }
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(verbose_file, "Parameters:\n");
+ HDfprintf(verbose_file, "\tindex type = %s\n", index_type);
+ HDfprintf(verbose_file, "\tcompression level = %d\n", comp_level);
+ HDfprintf(verbose_file, "\t# of records between flushes = %ld\n", flush_count);
+ HDfprintf(verbose_file, "\t# of records to write = %ld\n", nrecords);
+ } /* end if */
+
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stdout, "Using writer random seed: %u\n", random_seed);
+
+ /* Create the test file */
+ if((fid = create_file(FILENAME, verbose, verbose_file, random_seed)) < 0) {
+ HDfprintf(stderr, "Error creating the file...\n");
+ HDexit(1);
+ }
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Create the datasets in the file */
+ if(create_datasets(fid, comp_level, verbose, verbose_file, index_type) < 0) {
+ HDfprintf(stderr, "Error creating datasets...\n");
+ HDexit(1);
+ }
+
+ /* Enable SWMR writing mode */
+ if(H5Fstart_swmr_write(fid) < 0) {
+ HDfprintf(stderr, "Error starting SWMR writing mode...\n");
+ HDexit(1);
+ }
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Adding records\n");
+
+ /* Append records to datasets */
+ if(add_records(fid, verbose, verbose_file, (unsigned long)nrecords, (unsigned long)flush_count) < 0) {
+ HDfprintf(stderr, "Error appending records to datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing the file\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "Error closing file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+} /* main() */
+
diff --git a/test/swmr_writer.c b/test/swmr_writer.c
new file mode 100644
index 0000000..4700e2f
--- /dev/null
+++ b/test/swmr_writer.c
@@ -0,0 +1,450 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_writer.c
+ *
+ * Purpose: Writes data to a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_reader program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, hbool_t verbose, FILE *verbose_file,
+ unsigned random_seed, hbool_t old);
+static int add_records(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ unsigned long nrecords, unsigned long flush_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * hbool_t verbose
+ * Whether or not to emit verbose console messages
+ *
+ * FILE *verbose_file
+ * File handle for verbose output
+ *
+ * unsigned random_seed
+ * Random seed for the file (used for verbose logging)
+ *
+ * hbool_t old
+ * Whether to write in "old" file format
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, hbool_t verbose, FILE *verbose_file,
+ unsigned random_seed, hbool_t old)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ unsigned u, v; /* Local index variable */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ if(!old) {
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ }
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ HDfprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ HDfprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+ if(verbose) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_writer.log.%u", random_seed);
+
+ H5Pset_fapl_log(fapl, verbose_name, H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+ } /* end if */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: add_records
+ *
+ * Purpose: Writes a specified number of records to random datasets in
+ * the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * hbool_t verbose
+ * Whether or not to emit verbose console messages
+ *
+ * FILE *verbose_file
+ * File handle for verbose output
+ *
+ * unsigned long nrecords
+ * # of records to write to the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+add_records(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ unsigned long nrecords, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+ hsize_t dim[2] = {1, 0}; /* Dataspace dimensions */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned long rec_to_flush; /* # of records left to write before flush */
+ unsigned long u, v; /* Local index variables */
+
+ HDassert(fid >= 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Add records to random datasets, according to frequency distribution */
+ rec_to_flush = flush_count;
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Set the record's ID (equal to its position) */
+ record.rec_id = symbol->nrecords;
+
+ /* Get the coordinate to write */
+ start[1] = symbol->nrecords;
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ if(H5Odisable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Extend the dataset's dataspace to hold the new record */
+ symbol->nrecords++;
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose the last record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
+ return -1;
+
+ /* Uncork the metadata cache */
+ if(H5Oenable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ rec_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == rec_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ rec_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_writer [-q] [-o] [-f <# of records to write between flushing\n");
+ printf(" file contents>] [-r <random seed>] <# of records>\n");
+ printf("\n");
+ printf("<# of records to write between flushing file contents> should be 0\n");
+ printf("(for no flushing) or between 1 and (<# of records> - 1).\n");
+ printf("\n");
+ printf("<# of records> must be specified.\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), latest format when opening file (no '-o' given),\n");
+ printf("flushing every 10000 records ('-f 10000'), and will generate a random seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nrecords = 0; /* # of records to append */
+ long flush_count = 10000; /* # of records to write between flushing file */
+ hbool_t verbose = TRUE; /* Whether to emit some informational messages */
+ FILE *verbose_file = NULL; /* File handle for verbose output */
+ hbool_t old = FALSE; /* Whether to use non-latest-format when opening file */
+ hbool_t use_seed = FALSE; /* Set to TRUE if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variable */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = HDatol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = FALSE;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = TRUE;
+ temp = HDatoi(argv[u + 1]);
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* Use non-latest-format when opening file */
+ case 'o':
+ old = TRUE;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nrecords = HDatol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nrecords <= 0)
+ usage();
+ if(flush_count >= nrecords)
+ usage();
+
+ /* Set the random seed */
+ if(!use_seed) {
+ struct timeval t;
+
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+
+ /* Open output file */
+ if(verbose) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_writer.out.%u", random_seed);
+ if(NULL == (verbose_file = HDfopen(verbose_name, "w"))) {
+ HDfprintf(stderr, "Can't open verbose output file!\n");
+ HDexit(1);
+ }
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(verbose_file, "Parameters:\n");
+ HDfprintf(verbose_file, "\t# of records between flushes = %ld\n", flush_count);
+ HDfprintf(verbose_file, "\t# of records to write = %ld\n", nrecords);
+ } /* end if */
+
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stdout, "Using writer random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose, verbose_file, random_seed, old)) < 0) {
+ HDfprintf(stderr, "Error opening skeleton file!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Adding records\n");
+
+ /* Append records to datasets */
+ if(add_records(fid, verbose, verbose_file, (unsigned long)nrecords, (unsigned long)flush_count) < 0) {
+ HDfprintf(stderr, "Error appending records to datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "Error closing file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
+
diff --git a/test/test_usecases.sh.in b/test/test_usecases.sh.in
new file mode 100644
index 0000000..1cae191
--- /dev/null
+++ b/test/test_usecases.sh.in
@@ -0,0 +1,170 @@
+#! /bin/bash
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+# Tests the use cases of swmr features.
+#
+# Created:
+# Albert Cheng, 2013/06/01.
+# Modified:
+#
+
+# This is work in progress.
+# For now, it shows how to run the test cases programs. It only verifies the
+# exit codes are okay (0).
+
+srcdir=@srcdir@
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [[ $rc != 0 ]] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR"
+ echo
+ echo "SWMR use case tests skipped"
+ echo
+ exit 0
+fi
+
+# Define symbols
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+EXIT_VALUE=$EXIT_SUCCESS # Default all tests succeed
+RESULT_PASSED=" PASSED"
+RESULT_FAILED="*FAILED*"
+RESULT_SKIP="-SKIP-"
+USECASES_PROGRAMS="use_append_chunk use_append_mchunks"
+TESTNAME="Use Case"
+
+# Define variables
+nerrors=0
+verbose=yes
+
+# Source in the output filter function definitions.
+. $srcdir/../bin/output_filter.sh
+
+# Define functions
+# Print a line-line message left justified in a field of 72 characters.
+# Results can be " PASSED", "*FAILED*", "-SKIP-", up to 8 characters
+# wide.
+# SPACES should be at least 71 spaces. ($* + ' ' + 71 + 8 >= 80)
+#
+TESTING() {
+ SPACES=" "
+ echo "$* $SPACES" | cut -c1-72 | tr -d '\012'
+}
+
+# Run a test and print PASS or *FAIL*. If a test fails then increment
+# the `nerrors' global variable and (if $verbose is set) display the
+# difference between the actual output and the expected output. The
+# expected output is given as the first argument to this function and
+# the actual output file is calculated by replacing the `.ddl' with
+# `.out'. The actual output is not removed if $HDF5_NOCLEANUP has a
+# non-zero value.
+# ADD_H5_TEST
+TOOLTEST() {
+ program=$1
+ shift
+
+ actual="$program.out"
+ actual_err="$program.err"
+ actual_sav=${actual}-sav
+ actual_err_sav=${actual_err}-sav
+
+ # Run test.
+ TESTING $program $@
+ (
+ $RUNSERIAL ./$program "$@"
+ ) >$actual 2>$actual_err
+ exit_code=$?
+
+ # save actual and actual_err in case they are needed later.
+ cp $actual $actual_sav
+ STDOUT_FILTER $actual
+ cp $actual_err $actual_err_sav
+ STDERR_FILTER $actual_err
+ cat $actual_err >> $actual
+
+ if [ $exit_code -eq 0 ];then
+ echo "$RESULT_PASSED"
+ test yes = "$verbose" && sed 's/^/ /' < $actual
+ else
+ echo "$RESULT_FAILED"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && sed 's/^/ /' < $actual
+ fi
+
+ # Clean up output file
+ if test -z "$HDF5_NOCLEANUP"; then
+ rm -f $actual $actual_err $actual_sav $actual_err_sav $actual_ext
+ fi
+}
+
+# run tests for H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled here temporary
+USECORK=use_disable_mdc_flushes
+for p in $USECORK; do
+ TOOLTEST $p
+ TOOLTEST $p -y 3
+ TOOLTEST $p -n 3000
+ TOOLTEST $p -n 5000
+done
+
+# run write order test here temporary
+WRITEORDER=twriteorder
+for p in $WRITEORDER; do
+ TOOLTEST $p
+ TOOLTEST $p -b 1000
+ TOOLTEST $p -p 3000
+ TOOLTEST $p -n 2000
+ TOOLTEST $p -l w
+ TOOLTEST $p -l r
+done
+
+# Report test results
+if test $nerrors -eq 0 ; then
+ echo "$WRITEORDER test passed."
+else
+ echo "$WRITEORDER test failed with $nerrors errors."
+ EXIT_VALUE=$EXIT_FAILURE
+ nerrors=0 # reset nerror for the regular tests below.
+fi
+
+# main body
+for p in $USECASES_PROGRAMS; do
+ TOOLTEST ./$p
+ TOOLTEST ./$p -z 256
+ tmpfile=/tmp/datatfile.$$
+ TOOLTEST ./$p -f $tmpfile; rm -f $tmpfile
+ TOOLTEST ./$p -l w
+ TOOLTEST ./$p -l r
+ # use case 1.9, testing with multi-planes chunks
+ TOOLTEST ./$p -z 256 -y 5 # 5 planes chunks
+ # cleanup temp datafile
+ if test -z "$HDF5_NOCLEANUP"; then
+ rm -f $p.h5
+ fi
+done
+
+
+# Report test results and exit
+if test $nerrors -eq 0 ; then
+ echo "All $TESTNAME tests passed."
+else
+ echo "$TESTNAME tests failed with $nerrors errors."
+ EXIT_VALUE=$EXIT_FAILURE
+fi
+
+exit $EXIT_VALUE
diff --git a/test/testswmr.sh.in b/test/testswmr.sh.in
new file mode 100644
index 0000000..c4a75e8
--- /dev/null
+++ b/test/testswmr.sh.in
@@ -0,0 +1,529 @@
+#! /bin/bash
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+# Tests for the swmr feature.
+#
+# Created:
+# Albert Cheng, 2009/07/22
+
+srcdir=@srcdir@
+
+###############################################################################
+## test parameters
+###############################################################################
+
+Nreaders=5 # number of readers to launch
+Nrdrs_spa=3 # number of sparse readers to launch
+Nrecords=200000 # number of records to write
+Nrecs_rem=40000 # number of times to shrink
+Nrecs_spa=20000 # number of records to write in the sparse test
+Nsecs_add=5 # number of seconds per read interval
+Nsecs_rem=3 # number of seconds per read interval
+Nsecs_addrem=8 # number of seconds per read interval
+nerrors=0
+
+###############################################################################
+## definitions for message file to coordinate test runs
+###############################################################################
+WRITER_MESSAGE=SWMR_WRITER_MESSAGE # The message file created by writer that the open is complete
+ # This should be the same as the define in "./swmr_common.h"
+MESSAGE_TIMEOUT=300 # Message timeout length in secs
+ # This should be the same as the define in "./h5test.h"
+
+###############################################################################
+## short hands and function definitions
+###############################################################################
+DPRINT=: # Set to "echo Debug:" for debugging printing,
+ # else ":" for noop.
+IFDEBUG=: # Set to null to turn on debugging, else ":" for noop.
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# To wait for the writer message file or till the maximum # of seconds is reached
+# $1 is the message file to wait for
+# This performs similar function as the routine h5_wait_message() in test/h5test.c
+WAIT_MESSAGE() {
+ message=$1 # Get the name of the message file to wait for
+ t0=`date +%s` # Get current time in seconds
+ difft=0 # Initialize the time difference
+ mexist=0 # Indicate whether the message file is found
+ while [ $difft -lt $MESSAGE_TIMEOUT ] ; # Loop till message times out
+ do
+ t1=`date +%s` # Get current time in seconds
+ difft=`expr $t1 - $t0` # Calculate the time difference
+ if [ -e $message ]; then # If message file is found:
+ mexist=1 # indicate the message file is found
+ rm $message # remove the message file
+ break # get out of the while loop
+ fi
+ done;
+ if test $mexist -eq 0; then
+ # Issue warning that the writer message file is not found, continue with launching the reader(s)
+ echo warning: $WRITER_MESSAGE is not found after waiting $MESSAGE_TIMEOUT seconds
+ else
+ echo $WRITER_MESSAGE is found
+ fi
+}
+
+###############################################################################
+## Main
+##
+## Modifications:
+## Vailin Choi; July 2013
+## Add waiting of message file before launching the reader(s).
+## Due to the implementation of file locking, coordination
+## is needed in file opening for the writer/reader tests
+## to proceed as expected.
+##
+###############################################################################
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [ $rc -ne 0 ] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR."
+ echo
+ echo "SWMR acceptance tests skipped"
+ echo
+ exit 0
+fi
+
+# Parse options (none accepted at this time)
+while [ $# -gt 0 ]; do
+ case "$1" in
+ *) # unknown option
+ echo "$0: Unknown option ($1)"
+ exit 1
+ ;;
+ esac
+done
+
+# Loop over index types
+for index_type in "-i ea" "-i b2"
+do
+ # Try with and without compression
+ for compress in "" "-c 5"
+ do
+ echo
+ echo "*******************************************************************************"
+ echo "** Loop testing parameters: $index_type $compress"
+ echo "*******************************************************************************"
+ echo
+ echo
+ echo "###############################################################################"
+ echo "## Generator test"
+ echo "###############################################################################"
+ # Launch the Generator without SWMR_WRITE
+ echo launch the swmr_generator
+ ./swmr_generator $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Launch the Generator with SWMR_WRITE
+ echo launch the swmr_generator with SWMR_WRITE
+ ./swmr_generator -s $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+
+ echo
+ echo "###############################################################################"
+ echo "## Use H5Fstart_swmr_write() to enable SWMR writing mode"
+ echo "###############################################################################"
+
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ #
+ # Launch the Writer
+ echo launch the swmr_start_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_start_write $compress $index_type $Nrecords $seed 2>&1 |tee swmr_writer.out &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+
+ #
+ # Launch the Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ echo launch $Nreaders swmr_readers
+ pid_readers=""
+ n=0
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_reader $Nsecs_add $seed 2>&1 |tee swmr_reader.out.$n &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ echo "(Writer and reader output preserved)"
+ exit 1
+ fi
+
+ # Clean up output files
+ rm -f swmr_writer.out
+ rm -f swmr_reader.out.*
+
+ echo
+ echo "###############################################################################"
+ echo "## Writer test - test expanding the dataset"
+ echo "###############################################################################"
+
+ # Launch the Generator
+ echo launch the swmr_generator
+ ./swmr_generator -s $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ #
+ # Launch the Writer
+ echo launch the swmr_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_writer -o $Nrecords $seed 2>&1 |tee swmr_writer.out &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ echo launch $Nreaders swmr_readers
+ pid_readers=""
+ n=0
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_reader $Nsecs_add $seed 2>&1 |tee swmr_reader.out.$n &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ echo "(Writer and reader output preserved)"
+ exit 1
+ fi
+
+ # Clean up output files
+ rm -f swmr_writer.out
+ rm -f swmr_reader.out.*
+
+ echo
+ echo "###############################################################################"
+ echo "## Remove test - test shrinking the dataset"
+ echo "###############################################################################"
+
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ # Launch the Remove Writer
+ echo launch the swmr_remove_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_remove_writer -o $Nrecs_rem $seed 2>&1 |tee swmr_writer.out &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Remove Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ n=0
+ pid_readers=""
+ echo launch $Nreaders swmr_remove_readers
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_remove_reader $Nsecs_rem $seed 2>&1 |tee swmr_reader.out.$n &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ echo "(Writer and reader output preserved)"
+ exit 1
+ fi
+
+ # Clean up output files
+ rm -f swmr_writer.out
+ rm -f swmr_reader.out.*
+
+ echo
+ echo "###############################################################################"
+ echo "## Add/remove test - randomly grow or shrink the dataset"
+ echo "###############################################################################"
+
+ # Launch the Generator
+ echo launch the swmr_generator
+ ./swmr_generator $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Launch the Writer (not in parallel - just to rebuild the datasets)
+ echo launch the swmr_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_writer $Nrecords $seed
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ #
+ # Launch the Add/Remove Writer
+ echo launch the swmr_addrem_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_addrem_writer $Nrecords $seed 2>&1 |tee swmr_writer.out &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Add/Remove Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ n=0
+ pid_readers=""
+ echo launch $Nreaders swmr_remove_readers
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_remove_reader $Nsecs_addrem $seed 2>&1 |tee swmr_reader.out.$n &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ echo "(Writer and reader output preserved)"
+ exit 1
+ fi
+
+ # Clean up output files
+ rm -f swmr_writer.out
+ rm -f swmr_reader.out.*
+
+ echo
+ echo "###############################################################################"
+ echo "## Sparse writer test - test writing to random locations in the dataset"
+ echo "###############################################################################"
+
+ # Launch the Generator
+ # NOTE: Random seed is shared between readers and writers and is
+ # created by the generator.
+ echo launch the swmr_generator
+ seed="" # Put -r <random seed> command here
+ ./swmr_generator $compress $index_type $seed
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ # Launch the Sparse writer
+ echo launch the swmr_sparse_writer
+ nice -n 20 ./swmr_sparse_writer $Nrecs_spa 2>&1 |tee swmr_writer.out &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Sparse readers
+ n=0
+ pid_readers=""
+ echo launch $Nrdrs_spa swmr_sparse_readers
+ while [ $n -lt $Nrdrs_spa ]; do
+ # The sparse reader spits out a LOT of data so it's set to 'quiet'
+ ./swmr_sparse_reader -q $Nrecs_spa 2>&1 |tee swmr_reader.out.$n &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Collect exit code of the readers
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ echo "(Writer and reader output preserved)"
+ exit 1
+ fi
+
+ # Clean up output files
+ rm -f swmr_writer.out
+ rm -f swmr_reader.out.*
+ done
+done
+
+###############################################################################
+## Report and exit
+###############################################################################
+
+$DPRINT nerrors=$nerrors
+if test $nerrors -eq 0 ; then
+ echo "SWMR tests passed."
+ exit 0
+else
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+fi
+
diff --git a/test/testvdsswmr.sh.in b/test/testvdsswmr.sh.in
new file mode 100644
index 0000000..d69b8c0
--- /dev/null
+++ b/test/testvdsswmr.sh.in
@@ -0,0 +1,199 @@
+#! /bin/bash
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+# Tests for the swmr feature using virtual datasets.
+#
+# Created:
+# Dana Robinson, November 2015
+
+srcdir=@srcdir@
+
+###############################################################################
+## test parameters
+###############################################################################
+
+Nwriters=6 # number of writers (1 per source dataset)
+Nreaders=5 # number of readers to launch
+nerrors=0
+
+###############################################################################
+## definitions for message file to coordinate test runs
+###############################################################################
+WRITER_MESSAGE=SWMR_WRITER_MESSAGE # The message file created by writer that the open is complete
+ # This should be the same as the define in "./swmr_common.h"
+MESSAGE_TIMEOUT=300 # Message timeout length in secs
+ # This should be the same as the define in "./h5test.h"
+
+###############################################################################
+## short hands and function definitions
+###############################################################################
+DPRINT=: # Set to "echo Debug:" for debugging printing,
+ # else ":" for noop.
+IFDEBUG=: # Set to null to turn on debugging, else ":" for noop.
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# To wait for the writer message file or till the maximum # of seconds is reached
+# $1 is the message file to wait for
+# This performs similar function as the routine h5_wait_message() in test/h5test.c
+WAIT_MESSAGE() {
+ message=$1 # Get the name of the message file to wait for
+ t0=`date +%s` # Get current time in seconds
+ difft=0 # Initialize the time difference
+ mexist=0 # Indicate whether the message file is found
+ while [ $difft -lt $MESSAGE_TIMEOUT ] ; # Loop till message times out
+ do
+ t1=`date +%s` # Get current time in seconds
+ difft=`expr $t1 - $t0` # Calculate the time difference
+ if [ -e $message ]; then # If message file is found:
+ mexist=1 # indicate the message file is found
+ rm $message # remove the message file
+ break # get out of the while loop
+ fi
+ done;
+ if test $mexist -eq 0; then
+ # Issue warning that the writer message file is not found, continue with launching the reader(s)
+ echo warning: $WRITER_MESSAGE is not found after waiting $MESSAGE_TIMEOUT seconds
+ else
+ echo $WRITER_MESSAGE is found
+ fi
+}
+
+###############################################################################
+## Main
+###############################################################################
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [ $rc -ne 0 ] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR."
+ echo
+ echo "SWMR acceptance tests skipped"
+ echo
+ exit 0
+fi
+
+# Parse options (none accepted at this time)
+while [ $# -gt 0 ]; do
+ case "$1" in
+ *) # unknown option
+ echo "$0: Unknown option ($1)"
+ exit 1
+ ;;
+ esac
+done
+
+echo
+echo "###############################################################################"
+echo "## Basic VDS SWMR test - writing to a tiled plane"
+echo "###############################################################################"
+
+# Launch the file generator
+echo launch the generator
+./vds_swmr_gen
+if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+fi
+
+# Check for error and exit if one occured
+$DPRINT nerrors=$nerrors
+if test $nerrors -ne 0 ; then
+ echo "VDS SWMR tests failed with $nerrors errors."
+ exit 1
+fi
+
+# Launch the writers
+echo "launch the $Nwriters SWMR VDS writers (1 per source)"
+pid_writers=""
+n=0
+while [ $n -lt $Nwriters ]; do
+ ./vds_swmr_writer $n &
+ pid_writers="$pid_writers $!"
+ n=`expr $n + 1`
+done
+$DPRINT pid_writers=$pid_writers
+$IFDEBUG ps
+
+# Sleep to ensure that the writers have started
+sleep 3
+
+# Launch the readers
+echo launch $Nreaders SWMR readers
+pid_readers=""
+n=0
+while [ $n -lt $Nreaders ]; do
+ ./vds_swmr_reader &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+done
+$DPRINT pid_readers=$pid_readers
+$IFDEBUG ps
+
+# Collect exit code of the writers
+for xpid in $pid_writers; do
+ $DPRINT checked writer $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+done
+
+# Collect exit code of the readers
+# (they usually finish after the writers)
+for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+done
+
+# Check for error and exit if one occured
+$DPRINT nerrors=$nerrors
+if test $nerrors -ne 0 ; then
+ echo "VDS SWMR tests failed with $nerrors errors."
+ exit 1
+fi
+
+###############################################################################
+## Report and exit
+###############################################################################
+
+$DPRINT nerrors=$nerrors
+if test $nerrors -eq 0 ; then
+ echo "VDS SWMR tests passed."
+ exit 0
+else
+ echo "VDS SWMR tests failed with $nerrors errors."
+ exit 1
+fi
+
diff --git a/test/twriteorder.c b/test/twriteorder.c
new file mode 100644
index 0000000..58690f6
--- /dev/null
+++ b/test/twriteorder.c
@@ -0,0 +1,463 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+*
+* Test program: twriteorder
+*
+* Test to verify that the write order is strictly consistent.
+* The SWMR feature requires that the order of write is strictly consistent.
+* "Strict consistency in computer science is the most stringent consistency
+* model. It says that a read operation has to return the result of the
+* latest write operation which occurred on that data item."--
+* (http://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability).
+* This is also an alternative form of what POSIX write require that after a
+* write operation has returned success, all reads issued afterward should
+* get the same data the write has written.
+*
+* Created: Albert Cheng, 2013/8/28.
+* Modified:
+*************************************************************/
+
+/***********************************************************
+*
+* Algorithm
+*
+* The test simulates what SWMR does by writing chained blocks and see if
+* they can be read back correctly.
+* There is a writer process and multiple read processes.
+* The file is divided into 2KB partitions. Then writer writes 1 chained
+* block, each of 1KB big, in each partition after the first partition.
+* Each chained block has this structure:
+* Byte 0-3: offset address of its child block. The last child uses 0 as NULL.
+* Byte 4-1023: some artificial data.
+* The child block address of Block 1 is NULL (0).
+* The child block address of Block 2 is the offset address of Block 1.
+* The child block address of Block n is the offset address of Block n-1.
+* After all n blocks are written, the offset address of Block n is written
+* to the offset 0 of the first partition.
+* Therefore, by the time the offset address of Block n is written to this
+* position, all n chain-linked blocks have been written.
+*
+* The other reader processes will try to read the address value at the
+* offset 0. The value is initially NULL(0). When it changes to non-zero,
+* it signifies the writer process has written all the chain-link blocks
+* and they are ready for the reader processes to access.
+*
+* If the system, in which the writer and reader processes run, the readers
+* will always get all chain-linked blocks correctly. If the order of write
+* is not maintained, some reader processes may found unexpect block data.
+*
+*************************************************************/
+
+#include "h5test.h"
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
+#define DATAFILE "twriteorder.dat"
+/* #define READERS_MAX 10 */ /* max number of readers */
+#define BLOCKSIZE_DFT 1024 /* 1KB */
+#define PARTITION_DFT 2048 /* 2KB */
+#define NLINKEDBLOCKS_DFT 512 /* default 512 */
+#define SIZE_BLKADDR 4 /* expected sizeof blkaddr */
+#define Hgoto_error(val) {ret_value=val; goto done;}
+
+/* type declarations */
+typedef enum part_t {
+ UC_READWRITE =0, /* both writer and reader */
+ UC_WRITER, /* writer only */
+ UC_READER /* reader only */
+} part_t;
+
+/* prototypes */
+int create_wo_file(void);
+int write_wo_file(void);
+int read_wo_file(void);
+void usage(const char *prog);
+int setup_parameters(int argc, char * const argv[]);
+int parse_option(int argc, char * const argv[]);
+
+/* Global Variable definitions */
+const char *progname_g="twriteorder"; /* program name */
+int write_fd_g;
+int blocksize_g, part_size_g, nlinkedblock_g;
+part_t launch_g;
+
+/* Function definitions */
+
+/* Show help page */
+void
+usage(const char *prog)
+{
+ fprintf(stderr, "usage: %s [OPTIONS]\n", prog);
+ fprintf(stderr, " OPTIONS\n");
+ fprintf(stderr, " -h Print a usage message and exit\n");
+ fprintf(stderr, " -l w|r launch writer or reader only. [default: launch both]\n");
+ fprintf(stderr, " -b N Block size [default: %d]\n", BLOCKSIZE_DFT);
+ fprintf(stderr, " -p N Partition size [default: %d]\n", PARTITION_DFT);
+ fprintf(stderr, " -n N Number of linked blocks [default: %d]\n", NLINKEDBLOCKS_DFT);
+ fprintf(stderr, " where N is an integer value\n");
+ fprintf(stderr, "\n");
+}
+
+/* Setup test parameters by parsing command line options.
+ * Setup default values if not set by options. */
+int
+parse_option(int argc, char * const argv[])
+{
+ int ret_value=0;
+ int c;
+ /* command line options: See function usage for a description */
+ const char *cmd_options = "hb:l:n:p:";
+
+ /* suppress getopt from printing error */
+ opterr = 0;
+
+ while (1){
+ c = getopt (argc, argv, cmd_options);
+ if (-1 == c)
+ break;
+ switch (c) {
+ case 'h':
+ usage(progname_g);
+ exit(0);
+ break;
+ case 'b': /* number of planes to write/read */
+ if ((blocksize_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad blocksize %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'n': /* number of planes to write/read */
+ if ((nlinkedblock_g = atoi(optarg)) < 2){
+ fprintf(stderr, "bad number of linked blocks %s, must be greater than 1.\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'p': /* number of planes to write/read */
+ if ((part_size_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad partition size %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'l': /* launch reader or writer only */
+ switch (*optarg) {
+ case 'r': /* reader only */
+ launch_g = UC_READER;
+ break;
+ case 'w': /* writer only */
+ launch_g = UC_WRITER;
+ break;
+ default:
+ fprintf(stderr, "launch value(%c) should be w or r only.\n", *optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ break;
+ }
+ printf("launch = %d\n", launch_g);
+ break;
+ case '?':
+ fprintf(stderr, "getopt returned '%c'.\n", c);
+ usage(progname_g);
+ Hgoto_error(-1);
+ default:
+ fprintf(stderr, "getopt returned unexpected value.\n");
+ fprintf(stderr, "Unexpected value is %d\n", c);
+ Hgoto_error(-1);
+ }
+ }
+
+ /* verify partition size must be >= blocksize */
+ if (part_size_g < blocksize_g ){
+ fprintf(stderr, "Blocksize %d should not be bigger than partition size %d\n",
+ blocksize_g, part_size_g);
+ Hgoto_error(-1);
+ }
+
+done:
+ /* All done. */
+ return(ret_value);
+}
+
+/* Setup parameters for the test case.
+ * Return: 0 succeed; -1 fail.
+ */
+int setup_parameters(int argc, char * const argv[])
+{
+ /* test case defaults */
+ blocksize_g = BLOCKSIZE_DFT;
+ part_size_g = PARTITION_DFT;
+ nlinkedblock_g = NLINKEDBLOCKS_DFT;
+ launch_g = UC_READWRITE;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0){
+ return(-1);
+ }
+
+ /* show parameters and return */
+ printf("blocksize = %ld\n", (long)blocksize_g);
+ printf("part_size = %ld\n", (long)part_size_g);
+ printf("nlinkedblock = %ld\n", (long)nlinkedblock_g);
+ printf("launch = %d\n", launch_g);
+ return(0);
+}
+
+/* Create the test file with initial "empty" file, that is,
+ * partition 0 has a null (0) address.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int create_wo_file(void)
+{
+ int blkaddr=0; /* blkaddress of next linked block */
+ int ret_code;
+
+ /* Create the data file */
+ if ((write_fd_g = HDopen(DATAFILE, O_RDWR|O_TRUNC|O_CREAT, 0664)) < 0) {
+ printf("WRITER: error from open\n");
+ return -1;
+ }
+ blkaddr=0;
+ /* write it to partition 0 */
+ if ((ret_code=HDwrite(write_fd_g, &blkaddr, (size_t)SIZE_BLKADDR)) != SIZE_BLKADDR){
+ printf("blkaddr write failed\n");
+ return -1;
+ }
+
+ /* File initialized, return success */
+ return 0;
+}
+
+int write_wo_file(void)
+{
+ int blkaddr;
+ int blkaddr_old=0;
+ int i;
+ char buffer[BLOCKSIZE_DFT];
+ int ret_code;
+
+
+ /* write block 1, 2, ... */
+ for (i=1; i<nlinkedblock_g; i++){
+ /* calculate where to write this block */
+ blkaddr = i*part_size_g + i;
+ /* store old block address in byte 0-3 */
+ HDmemcpy(&buffer[0], &blkaddr_old, sizeof(blkaddr_old));
+ /* fill the rest with the lowest byte of i */
+ HDmemset(&buffer[4], i & 0xff, (size_t) (BLOCKSIZE_DFT-4));
+ /* write the block */
+#ifdef DEBUG
+ printf("writing block at %d\n", blkaddr);
+#endif
+ HDlseek(write_fd_g, (HDoff_t)blkaddr, SEEK_SET);
+ if ((ret_code=HDwrite(write_fd_g, buffer, (size_t)blocksize_g)) != blocksize_g){
+ printf("blkaddr write failed in partition %d\n", i);
+ return -1;
+ }
+ blkaddr_old = blkaddr;
+ }
+ /* write the last blkaddr in partition 0 */
+ HDlseek(write_fd_g, (HDoff_t)0, SEEK_SET);
+ if ((ret_code=HDwrite(write_fd_g, &blkaddr_old, (size_t)sizeof(blkaddr_old))) != sizeof(blkaddr_old)){
+ printf("blkaddr write failed in partition %d\n", 0);
+ return -1;
+ }
+
+ /* all writes done. return succeess. */
+#ifdef DEBUG
+ printf("wrote %d blocks\n", nlinkedblock_g);
+#endif
+ return 0;
+}
+
+int read_wo_file(void)
+{
+ int read_fd;
+ int blkaddr=0;
+ int ret_code;
+ int linkedblocks_read=0;
+ char buffer[BLOCKSIZE_DFT];
+
+ /* Open the data file */
+ if ((read_fd = HDopen(DATAFILE, O_RDONLY, 0)) < 0) {
+ printf("READER: error from open\n");
+ return -1;
+ }
+ /* keep reading the initial block address until it is non-zero before proceeding. */
+ while (blkaddr == 0){
+ HDlseek(read_fd, (HDoff_t)0, SEEK_SET);
+ if ((ret_code=HDread(read_fd, &blkaddr, (size_t)sizeof(blkaddr))) != sizeof(blkaddr)){
+ printf("blkaddr read failed in partition %d\n", 0);
+ return -1;
+ }
+ }
+ linkedblocks_read++;
+
+ /* got a non-zero blkaddr. Proceed down the linked blocks. */
+#ifdef DEBUG
+ printf("got initial block address=%d\n", blkaddr);
+#endif
+ while (blkaddr != 0){
+ HDlseek(read_fd, (HDoff_t)blkaddr, SEEK_SET);
+ if ((ret_code=HDread(read_fd, buffer, (size_t)blocksize_g)) != blocksize_g){
+ printf("blkaddr read failed in partition %d\n", 0);
+ return -1;
+ }
+ linkedblocks_read++;
+ /* retrieve the block address in byte 0-3 */
+ HDmemcpy(&blkaddr, &buffer[0], sizeof(blkaddr));
+#ifdef DEBUG
+ printf("got next block address=%d\n", blkaddr);
+#endif
+ }
+
+#ifdef DEBUG
+ printf("read %d blocks\n", linkedblocks_read);
+#endif
+ return 0;
+}
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created the test file needed and close it;
+ * fork: child processes become the reader processes;
+ * while parent process continues as the writer process;
+ * both run till ending conditions are met.
+ */
+int
+main(int argc, char *argv[])
+{
+ /*pid_t childpid[READERS_MAX];
+ int child_ret_value[READERS_MAX];*/
+ pid_t childpid=0;
+ int child_ret_value;
+ pid_t mypid, tmppid;
+ int child_status;
+ int child_wait_option=0;
+ int ret_value = 0;
+
+ /* initialization */
+ if (setup_parameters(argc, argv) < 0){
+ Hgoto_error(1);
+ }
+
+ /* ==============================================================*/
+ /* UC_READWRITE: create datafile, launch both reader and writer. */
+ /* UC_WRITER: create datafile, skip reader, launch writer. */
+ /* UC_READER: skip create, launch reader, exit. */
+ /* ==============================================================*/
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ if (launch_g != UC_READER){
+ printf("Creating skeleton data file for test...\n");
+ if (create_wo_file() < 0){
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ }else
+ printf("File created.\n");
+ }
+ /* flush output before possible fork */
+ HDfflush(stdout);
+
+ if (launch_g==UC_READWRITE){
+ /* fork process */
+ if((childpid = fork()) < 0) {
+ perror("fork");
+ Hgoto_error(1);
+ };
+ };
+ mypid = getpid();
+
+ /* ============= */
+ /* launch reader */
+ /* ============= */
+ if (launch_g != UC_WRITER){
+ /* child process launch the reader */
+ if(0 == childpid) {
+ printf("%d: launch reader process\n", mypid);
+ if (read_wo_file() < 0){
+ fprintf(stderr, "read_wo_file encountered error\n");
+ exit(1);
+ }
+ /* Reader is done. Clean up by removing the data file */
+ HDremove(DATAFILE);
+ exit(0);
+ }
+ }
+
+ /* ============= */
+ /* launch writer */
+ /* ============= */
+ /* this process continues to launch the writer */
+#ifdef DEBUG
+ printf("%d: continue as the writer process\n", mypid);
+#endif
+ if (write_wo_file() < 0){
+ fprintf(stderr, "write_wo_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+ /* ================================================ */
+ /* If readwrite, collect exit code of child process */
+ /* ================================================ */
+ if (launch_g == UC_READWRITE){
+ if ((tmppid = waitpid(childpid, &child_status, child_wait_option)) < 0){
+ perror("waitpid");
+ Hgoto_error(1);
+ }
+ if (WIFEXITED(child_status)){
+ if ((child_ret_value=WEXITSTATUS(child_status)) != 0){
+ printf("%d: child process exited with non-zero code (%d)\n",
+ mypid, child_ret_value);
+ Hgoto_error(2);
+ }
+ } else {
+ printf("%d: child process terminated abnormally\n", mypid);
+ Hgoto_error(2);
+ }
+ }
+
+done:
+ /* Print result and exit */
+ if (ret_value != 0){
+ printf("Error(s) encountered\n");
+ }else{
+ printf("All passed\n");
+ }
+
+ return(ret_value);
+}
+
+#else /* H5_HAVE_FORK */
+
+int
+main(void)
+{
+ HDfprintf(stderr, "Non-POSIX platform. Skipping.\n");
+ return EXIT_SUCCESS;
+} /* end main() */
+
+#endif /* H5_HAVE_FORK */
+
diff --git a/test/use.h b/test/use.h
new file mode 100644
index 0000000..dce79a0
--- /dev/null
+++ b/test/use.h
@@ -0,0 +1,64 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Use Case Header file: common definitions for use cases tests.
+ */
+#include "h5test.h"
+
+/* Macro definitions */
+#define Hgoto_error(val) {ret_value=val; goto done;}
+#define Hgoto_done {goto done;}
+#define Chunksize_DFT 256 /* chunksize default */
+#define ErrorReportMax 10 /* max number of errors reported */
+/* these two definitions must match each other */
+#define UC_DATATYPE H5T_NATIVE_SHORT /* use case HDF5 data type */
+#define UC_CTYPE short /* use case C data type */
+#define UC_RANK 3 /* use case dataset rank */
+
+/* Name of message file that is sent by the writer */
+#define WRITER_MESSAGE "USE_WRITER_MESSAGE"
+
+/* type declarations */
+typedef enum part_t {
+ UC_READWRITE =0, /* both writer and reader */
+ UC_WRITER, /* writer only */
+ UC_READER /* reader only */
+} part_t;
+typedef struct options_t {
+ int chunksize; /* chunks are chunksize^2 planes */
+ int chunkplanes; /* number of planes per chunk, default 1 */
+ hsize_t chunkdims[UC_RANK]; /* chunk dims is (chunkplan, chunksize, chunksize) */
+ hsize_t dims[UC_RANK]; /* dataset initial dims */
+ hsize_t max_dims[UC_RANK]; /* dataset max dims */
+ hsize_t nplanes; /* number of planes to write, default proportional to chunksize */
+ char *filename; /* use case data filename */
+ part_t launch; /* launch writer, reader or both */
+ int use_swmr; /* use swmr open (1) or not */
+ int iterations; /* iterations, default 1 */
+} options_t;
+
+/* global variables declarations */
+extern options_t UC_opts; /* Use Case Options */
+extern const char *progname_g; /* Program name */
+
+/* prototype declarations */
+int parse_option(int argc, char * const argv[]);
+int setup_parameters(int argc, char * const argv[]);
+void show_parameters(void);
+void usage(const char *prog);
+int create_uc_file(void);
+int write_uc_file(hbool_t tosend);
+int read_uc_file(hbool_t towait);
+
diff --git a/test/use_append_chunk.c b/test/use_append_chunk.c
new file mode 100644
index 0000000..1dd76c9
--- /dev/null
+++ b/test/use_append_chunk.c
@@ -0,0 +1,233 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Use Case 1.7 Appending a single chunk
+ * Description:
+ * Appending a single chunk of raw data to a dataset along an unlimited
+ * dimension within a pre-created file and reading the new data back.
+ * Goal:
+ * Read data appended by the Writer to a pre-existing dataset in a
+ * file. The dataset has one or more unlimited dimensions. The data is
+ * appended by a hyperslab that is contained in one chunk (for example,
+ * appending 2-dim planes along the slowest changing dimension in the
+ * 3-dim dataset).
+ * Level:
+ * User Level
+ * Guarantees:
+ * o Readers will see the modified dimension sizes after the Writer
+ * finishes HDF5 metadata updates and issues H5Fflush or H5Oflush calls.
+ * o Readers will see newly appended data after the Writer finishes
+ * the flush operation.
+ *
+ * Preconditions:
+ * o Readers are not allowed to modify the file. o All datasets
+ * that are modified by the Writer exist when the Writer opens the file.
+ * o All datasets that are modified by the Writer exist when a Reader
+ * opens the file. o Data is written by a hyperslab contained in
+ * one chunk.
+ *
+ * Main Success Scenario:
+ * 1. An application creates a file with required objects (groups,
+ * datasets, and attributes).
+ * 2. The Writer application opens the file and datasets in the file
+ * and starts adding data along the unlimited dimension using a hyperslab
+ * selection that corresponds to an HDF5 chunk.
+ * 3. A Reader opens the file and a dataset in a file, and queries
+ * the sizes of the dataset; if the extent of the dataset has changed,
+ * reads the appended data back.
+ *
+ * Discussion points:
+ * 1. Since the new data is written to the file, and metadata update
+ * operation of adding pointer to the newly written chunk is atomic and
+ * happens after the chunk is on the disk, only two things may happen
+ * to the Reader:
+ * o The Reader will not see new data.
+ * o The Reader will see all new data written by Writer.
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Created: Albert Cheng, 2013/5/28 */
+
+#include "h5test.h"
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
+#include "use.h"
+
+/* Global Variable definitions */
+options_t UC_opts; /* Use Case Options */
+const char *progname_g="use_append_chunk"; /* program name */
+
+/* Setup parameters for the use case.
+ * Return: 0 succeed; -1 fail.
+ */
+int setup_parameters(int argc, char * const argv[])
+{
+ /* use case defaults */
+ HDmemset(&UC_opts, 0, sizeof(options_t));
+ UC_opts.chunksize = Chunksize_DFT;
+ UC_opts.use_swmr = 1; /* use swmr open */
+ UC_opts.iterations = 1;
+ UC_opts.chunkplanes = 1;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0){
+ return(-1);
+ }
+ /* set chunk dims */
+ UC_opts.chunkdims[0] = UC_opts.chunkplanes;
+ UC_opts.chunkdims[1]=UC_opts.chunkdims[2]=UC_opts.chunksize;
+
+ /* set dataset initial and max dims */
+ UC_opts.dims[0] = 0;
+ UC_opts.max_dims[0] = H5S_UNLIMITED;
+ UC_opts.dims[1] = UC_opts.dims[2] = UC_opts.max_dims[1]=UC_opts.max_dims[2]=UC_opts.chunksize;
+
+ /* set nplanes */
+ if (UC_opts.nplanes == 0)
+ UC_opts.nplanes = UC_opts.chunksize;
+
+ /* show parameters and return */
+ show_parameters();
+ return(0);
+}
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created test files needed and close it;
+ * fork: child process becomes the reader process;
+ * while parent process continues as the writer process;
+ * both run till ending conditions are met.
+ */
+int
+main(int argc, char *argv[])
+{
+ pid_t childpid=0;
+ pid_t mypid, tmppid;
+ int child_status;
+ int child_wait_option=0;
+ int ret_value = 0;
+ int child_ret_value;
+ hbool_t send_wait = 0;
+
+ /* initialization */
+ if (setup_parameters(argc, argv) < 0){
+ Hgoto_error(1);
+ }
+
+ /* Determine the need to send/wait message file*/
+ if(UC_opts.launch == UC_READWRITE) {
+ HDunlink(WRITER_MESSAGE);
+ send_wait = 1;
+ }
+
+ /* ==============================================================*/
+ /* UC_READWRITE: create datafile, launch both reader and writer. */
+ /* UC_WRITER: create datafile, skip reader, launch writer. */
+ /* UC_READER: skip create, launch reader, exit. */
+ /* ==============================================================*/
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ if (UC_opts.launch != UC_READER){
+ printf("Creating skeleton data file for test...\n");
+ if (create_uc_file() < 0){
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ }else
+ printf("File created.\n");
+ }
+
+ if (UC_opts.launch==UC_READWRITE){
+ /* fork process */
+ if((childpid = fork()) < 0) {
+ perror("fork");
+ Hgoto_error(1);
+ };
+ };
+ mypid = getpid();
+
+ /* ============= */
+ /* launch reader */
+ /* ============= */
+ if (UC_opts.launch != UC_WRITER){
+ /* child process launch the reader */
+ if(0 == childpid) {
+ printf("%d: launch reader process\n", mypid);
+ if (read_uc_file(send_wait) < 0){
+ fprintf(stderr, "read_uc_file encountered error\n");
+ exit(1);
+ }
+ exit(0);
+ }
+ }
+
+ /* ============= */
+ /* launch writer */
+ /* ============= */
+ /* this process continues to launch the writer */
+ printf("%d: continue as the writer process\n", mypid);
+ if (write_uc_file(send_wait) < 0){
+ fprintf(stderr, "write_uc_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+ /* ================================================ */
+ /* If readwrite, collect exit code of child process */
+ /* ================================================ */
+ if (UC_opts.launch == UC_READWRITE){
+ if ((tmppid = waitpid(childpid, &child_status, child_wait_option)) < 0){
+ perror("waitpid");
+ Hgoto_error(1);
+ }
+ if (WIFEXITED(child_status)){
+ if ((child_ret_value=WEXITSTATUS(child_status)) != 0){
+ printf("%d: child process exited with non-zero code (%d)\n",
+ mypid, child_ret_value);
+ Hgoto_error(2);
+ }
+ } else {
+ printf("%d: child process terminated abnormally\n", mypid);
+ Hgoto_error(2);
+ }
+ }
+
+done:
+ /* Print result and exit */
+ if (ret_value != 0){
+ printf("Error(s) encountered\n");
+ }else{
+ printf("All passed\n");
+ }
+
+ return(ret_value);
+}
+
+#else /* H5_HAVE_FORK */
+
+int
+main(void)
+{
+ HDfprintf(stderr, "Non-POSIX platform. Skipping.\n");
+ return EXIT_SUCCESS;
+} /* end main() */
+
+#endif /* H5_HAVE_FORK */
+
diff --git a/test/use_append_mchunks.c b/test/use_append_mchunks.c
new file mode 100644
index 0000000..7728225
--- /dev/null
+++ b/test/use_append_mchunks.c
@@ -0,0 +1,226 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Use Case 1.8 Appending a hyperslab of multiple chunks.
+ * Description:
+ * Appending a hyperslab that spans several chunks of a dataset with
+ * unlimited dimensions within a pre-created file and reading the new
+ * data back.
+ * Goal:
+ * Read data appended by the Writer to a pre-existing dataset in a
+ * file. The dataset has one or more unlimited dimensions. The data
+ * is appended by a hyperslab that is contained in several chunks (for
+ * example, appending 2-dim planes along the slowest changing dimension
+ * in the 3-dim dataset and each plane is covered by 4 chunks).
+ * Level:
+ * User Level
+ * Guarantees:
+ * o Readers will see the modified dimension sizes after the Writer
+ * finishes HDF5 metadata updates and issues H5Fflush or H5Oflush calls.
+ * o Readers will see newly appended data after the Writer finishes
+ * the flush operation.
+ *
+ * Preconditions:
+ * o Readers are not allowed to modify the file.
+ * o All datasets that are modified by the Writer exist when the
+ * Writer opens the file.
+ * o All datasets that are modified by the Writer exist when a Reader
+ * opens the file.
+ *
+ * Main Success Scenario:
+ * 1. An application creates a file with required objects (groups,
+ * datasets, and attributes).
+ * 2. The Writer opens the file and datasets in the file and starts
+ * adding data using H5Dwrite call with a hyperslab selection that
+ * spans several chunks.
+ * 3. A Reader opens the file and a dataset in a file; if the size of
+ * the unlimited dimension has changed, reads the appended data back.
+ *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Created: Albert Cheng, 2013/6/1 */
+
+#include "h5test.h"
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
+#include "use.h"
+
+/* Global Variable definitions */
+options_t UC_opts; /* Use Case Options */
+const char *progname_g="use_append_mchunks"; /* program name */
+
+/* Setup parameters for the use case.
+ * Return: 0 succeed; -1 fail.
+ */
+int setup_parameters(int argc, char * const argv[])
+{
+ /* use case defaults */
+ HDmemset(&UC_opts, 0, sizeof(options_t));
+ UC_opts.chunksize = Chunksize_DFT;
+ UC_opts.use_swmr = 1; /* use swmr open */
+ UC_opts.iterations = 1;
+ UC_opts.chunkplanes = 1;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0){
+ return(-1);
+ }
+ /* set chunk dims */
+ UC_opts.chunkdims[0] = UC_opts.chunkplanes;
+ UC_opts.chunkdims[1]=UC_opts.chunkdims[2]=UC_opts.chunksize;
+
+ /* set dataset initial and max dims */
+ UC_opts.dims[0] = 0;
+ UC_opts.max_dims[0] = H5S_UNLIMITED;
+ UC_opts.dims[1] = UC_opts.dims[2] = UC_opts.max_dims[1]=UC_opts.max_dims[2]=2*UC_opts.chunksize;
+
+ /* set nplanes */
+ if (UC_opts.nplanes == 0)
+ UC_opts.nplanes = 2*UC_opts.chunksize;
+
+ /* show parameters and return */
+ show_parameters();
+ return(0);
+}
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created test files needed and close it;
+ * fork: child process becomes the reader process;
+ * while parent process continues as the writer process;
+ * both run till ending conditions are met.
+ */
+int
+main(int argc, char *argv[])
+{
+ pid_t childpid=0;
+ pid_t mypid, tmppid;
+ int child_status;
+ int child_wait_option=0;
+ int ret_value = 0;
+ int child_ret_value;
+ hbool_t send_wait = 0;
+
+ /* initialization */
+ if (setup_parameters(argc, argv) < 0){
+ Hgoto_error(1);
+ }
+
+ /* Determine the need to send/wait message file*/
+ if(UC_opts.launch == UC_READWRITE) {
+ HDunlink(WRITER_MESSAGE);
+ send_wait = 1;
+ }
+
+ /* ==============================================================*/
+ /* UC_READWRITE: create datafile, launch both reader and writer. */
+ /* UC_WRITER: create datafile, skip reader, launch writer. */
+ /* UC_READER: skip create, launch reader, exit. */
+ /* ==============================================================*/
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ if (UC_opts.launch != UC_READER){
+ printf("Creating skeleton data file for test...\n");
+ if (create_uc_file() < 0){
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ }else
+ printf("File created.\n");
+ }
+
+ if (UC_opts.launch==UC_READWRITE){
+ /* fork process */
+ if((childpid = fork()) < 0) {
+ perror("fork");
+ Hgoto_error(1);
+ };
+ };
+ mypid = getpid();
+
+ /* ============= */
+ /* launch reader */
+ /* ============= */
+ if (UC_opts.launch != UC_WRITER){
+ /* child process launch the reader */
+ if(0 == childpid) {
+ printf("%d: launch reader process\n", mypid);
+ if (read_uc_file(send_wait) < 0){
+ fprintf(stderr, "read_uc_file encountered error\n");
+ exit(1);
+ }
+ exit(0);
+ }
+ }
+
+ /* ============= */
+ /* launch writer */
+ /* ============= */
+ /* this process continues to launch the writer */
+ printf("%d: continue as the writer process\n", mypid);
+ if (write_uc_file(send_wait) < 0){
+ fprintf(stderr, "write_uc_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+ /* ================================================ */
+ /* If readwrite, collect exit code of child process */
+ /* ================================================ */
+ if (UC_opts.launch == UC_READWRITE){
+ if ((tmppid = waitpid(childpid, &child_status, child_wait_option)) < 0){
+ perror("waitpid");
+ Hgoto_error(1);
+ }
+ if (WIFEXITED(child_status)){
+ if ((child_ret_value=WEXITSTATUS(child_status)) != 0){
+ printf("%d: child process exited with non-zero code (%d)\n",
+ mypid, child_ret_value);
+ Hgoto_error(2);
+ }
+ } else {
+ printf("%d: child process terminated abnormally\n", mypid);
+ Hgoto_error(2);
+ }
+ }
+
+done:
+ /* Print result and exit */
+ if (ret_value != 0){
+ printf("Error(s) encountered\n");
+ }else{
+ printf("All passed\n");
+ }
+
+ return(ret_value);
+}
+
+#else /* H5_HAVE_FORK */
+
+int
+main(void)
+{
+ HDfprintf(stderr, "Non-POSIX platform. Skipping.\n");
+ return EXIT_SUCCESS;
+} /* end main() */
+
+#endif /* H5_HAVE_FORK */
+
diff --git a/test/use_common.c b/test/use_common.c
new file mode 100644
index 0000000..99c0bd1
--- /dev/null
+++ b/test/use_common.c
@@ -0,0 +1,653 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "h5test.h"
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
+#include "use.h"
+
+#define H5D_FRIEND /*suppress error about including H5Dpkg */
+#define H5D_TESTING
+#include "H5Dpkg.h"
+
+void
+usage(const char *prog)
+{
+ HDfprintf(stderr, "usage: %s [OPTIONS]\n", prog);
+ HDfprintf(stderr, " OPTIONS\n");
+ HDfprintf(stderr, " -h, --help Print a usage message and exit\n");
+ HDfprintf(stderr, " -f FN Test file name [default: %s.h5]\n", prog);
+ HDfprintf(stderr, " -i N, --iteration=N Number of iterations to repeat the whole thing. [default: 1]\n");
+ HDfprintf(stderr, " -l w|r launch writer or reader only. [default: launch both]\n");
+ HDfprintf(stderr, " -n N, --nplanes=N Number of planes to write/read. [default: 1000]\n");
+ HDfprintf(stderr, " -s N, --swmr=N Use SWMR mode (0: no, non-0: yes) default is yes\n");
+ HDfprintf(stderr, " -z N, --chunksize=N Chunk size [default: %d]\n", Chunksize_DFT);
+ HDfprintf(stderr, " -y N, --chunkplanes=N Number of planes per chunk [default: 1]\n");
+ HDfprintf(stderr, "\n");
+} /* end usage() */
+
+/* Setup Use Case parameters by parsing command line options.
+* Setup default values if not set by options. */
+int
+parse_option(int argc, char * const argv[])
+{
+ int ret_value=0;
+ int c;
+ /* command line options: See function usage for a description */
+ const char *nagg_options = "f:hi:l:n:s:y:z:";
+
+ /* suppress getopt from printing error */
+ opterr = 0;
+
+ while (1){
+ c = getopt (argc, argv, nagg_options);
+ if (-1 == c)
+ break;
+ switch (c) {
+ case 'h':
+ usage(progname_g);
+ exit(0);
+ break;
+ case 'f': /* usecase data file name */
+ UC_opts.filename = optarg;
+ break;
+ case 'i': /* iterations */
+ if ((UC_opts.iterations = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad iterations number %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'l': /* launch reader or writer only */
+ switch (*optarg) {
+ case 'r': /* reader only */
+ UC_opts.launch = UC_READER;
+ break;
+ case 'w': /* writer only */
+ UC_opts.launch = UC_WRITER;
+ break;
+ default:
+ fprintf(stderr, "launch value(%c) should be w or r only.\n", *optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ break;
+ }
+ break;
+ case 'n': /* number of planes to write/read */
+ if ((UC_opts.nplanes = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad number of planes %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 's': /* use swmr file open mode */
+ if ((UC_opts.use_swmr = atoi(optarg)) < 0){
+ fprintf(stderr, "swmr value should be 0(no) or 1(yes)\n");
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'y': /* Number of planes per chunk */
+ if ((UC_opts.chunkplanes = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad number of planes per chunk %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'z': /* size of chunk=(z,z) */
+ if ((UC_opts.chunksize = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad chunksize %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case '?':
+ fprintf(stderr, "getopt returned '%c'.\n", c);
+ Hgoto_error(-1);
+ default:
+ fprintf(stderr, "getopt returned unexpected value.\n");
+ fprintf(stderr, "Unexpected value is %d\n", c);
+ Hgoto_error(-1);
+ }
+ }
+
+ /* set test file name if not given */
+ if (!UC_opts.filename){
+ /* default data file name is <progname>.h5 */
+ if ((UC_opts.filename=(char*)HDmalloc(HDstrlen(progname_g)+4))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ Hgoto_error(-1);
+ };
+ HDstrcpy(UC_opts.filename, progname_g);
+ HDstrcat(UC_opts.filename, ".h5");
+ }
+
+done:
+ /* All done. */
+ return(ret_value);
+}
+
+/* Show parameters used for this use case */
+void show_parameters(void){
+ printf("===Parameters used:===\n");
+ printf("chunk dims=(%llu, %llu, %llu)\n", (unsigned long long)UC_opts.chunkdims[0],
+ (unsigned long long)UC_opts.chunkdims[1], (unsigned long long)UC_opts.chunkdims[2]);
+ printf("dataset max dims=(%llu, %llu, %llu)\n", (unsigned long long)UC_opts.max_dims[0],
+ (unsigned long long)UC_opts.max_dims[1], (unsigned long long)UC_opts.max_dims[2]);
+ printf("number of planes to write=%llu\n", (unsigned long long)UC_opts.nplanes);
+ printf("using SWMR mode=%s\n", UC_opts.use_swmr ? "yes(1)" : "no(0)");
+ printf("data filename=%s\n", UC_opts.filename);
+ printf("launch part=");
+ switch (UC_opts.launch){
+ case UC_READWRITE:
+ printf("Reader/Writer\n");
+ break;
+ case UC_WRITER:
+ printf("Writer\n");
+ break;
+ case UC_READER:
+ printf("Reader\n");
+ break;
+ default:
+ /* should not happen */
+ printf("Illegal part(%d)\n", UC_opts.launch);
+ };
+ printf("number of iterations=%d (not used yet)\n", UC_opts.iterations);
+ printf("===Parameters shown===\n");
+}
+
+/* Create the skeleton use case file for testing.
+ * It has one 3d dataset using chunked storage.
+ * The dataset is (unlimited, chunksize, chunksize).
+ * Dataset type is 2 bytes integer.
+ * It starts out "empty", i.e., first dimension is 0.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int create_uc_file(void)
+{
+ hsize_t dims[3]; /* Dataset starting dimensions */
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t sid; /* Dataspace ID */
+ hid_t dsid; /* Dataset ID */
+ hid_t fapl; /* File access property list */
+ H5D_chunk_index_t idx_type; /* Chunk index type */
+
+ /* Create the file */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fcreate(UC_opts.filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ return -1;
+
+ /* Set up dimension sizes */
+ dims[0] = 0;
+ dims[1] = dims[2] = UC_opts.max_dims[1];
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(3, dims, UC_opts.max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 3, UC_opts.chunkdims) < 0)
+ return -1;
+
+ /* create dataset of progname */
+ if((dsid = H5Dcreate2(fid, progname_g, UC_DATATYPE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Check that the chunk index type is not version 1 B-tree.
+ * Version 1 B-trees are not supported under SWMR.
+ */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0)
+ return -1;
+ if(idx_type == H5D_CHUNK_IDX_BTREE) {
+ fprintf(stderr, "ERROR: Chunk index is version 1 B-tree: aborting.\n");
+ return -1;
+ }
+
+ /* Close everything */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ if(H5Pclose(fapl) < 0)
+ return -1;
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Append planes, each of (1,2*chunksize,2*chunksize) to the dataset.
+ * In other words, 4 chunks are appended to the dataset at a time.
+ * Fill each plan with the plane number and then write it at the nth plane.
+ * Increase the plane number and repeat till the end of dataset, when it
+ * reaches chunksize long. End product is a (2*chunksize)^3 cube.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int write_uc_file(hbool_t tosend)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dsid; /* dataset ID */
+ hid_t fapl; /* File access property list */
+ hid_t dcpl; /* Dataset creation property list */
+ char *name;
+ UC_CTYPE *buffer, *bufptr; /* data buffer */
+ hsize_t cz=UC_opts.chunksize; /* Chunk size */
+ hid_t f_sid; /* dataset file space id */
+ hid_t m_sid; /* memory space id */
+ int rank; /* rank */
+ hsize_t chunk_dims[3]; /* Chunk dimensions */
+ hsize_t dims[3]; /* Dataspace dimensions */
+ hsize_t memdims[3]; /* Memory space dimensions */
+ hsize_t start[3] = {0,0,0}, count[3]; /* Hyperslab selection values */
+ hsize_t i, j, k;
+
+ name = UC_opts.filename;
+
+ /* Open the file */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+ if(UC_opts.use_swmr)
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fopen(name, H5F_ACC_RDWR | (UC_opts.use_swmr ? H5F_ACC_SWMR_WRITE : 0), fapl)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+
+ if(tosend)
+ /* Send a message that H5Fopen is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Open the dataset of the program name */
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+
+ /* Find chunksize used */
+ if ((dcpl = H5Dget_create_plist(dsid)) < 0){
+ fprintf(stderr, "H5Dget_create_plist failed\n");
+ return -1;
+ }
+ if (H5D_CHUNKED != H5Pget_layout(dcpl)){
+ fprintf(stderr, "storage layout is not chunked\n");
+ return -1;
+ }
+ if ((rank = H5Pget_chunk(dcpl, 3, chunk_dims)) != 3){
+ fprintf(stderr, "storage rank is not 3\n");
+ return -1;
+ }
+
+ /* verify chunk_dims against set paramenters */
+ if (chunk_dims[0]!=UC_opts.chunkdims[0] || chunk_dims[1] != cz || chunk_dims[2] != cz){
+ fprintf(stderr, "chunk size is not as expected. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)chunk_dims[0], (unsigned long long)chunk_dims[1],
+ (unsigned long long)chunk_dims[2]);
+ return -1;
+ }
+
+ /* allocate space for data buffer 1 X dims[1] X dims[2] of UC_CTYPE */
+ memdims[0]=1;
+ memdims[1] = UC_opts.dims[1];
+ memdims[2] = UC_opts.dims[2];
+ if ((buffer=(UC_CTYPE*)HDmalloc((size_t)memdims[1]*(size_t)memdims[2]*sizeof(UC_CTYPE)))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ return -1;
+ };
+
+ /*
+ * Get dataset rank and dimension.
+ */
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ rank = H5Sget_simple_extent_ndims(f_sid);
+ if (rank != UC_RANK){
+ fprintf(stderr, "rank(%d) of dataset does not match\n", rank);
+ return -1;
+ }
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ printf("dataset rank %d, dimensions %llu x %llu x %llu\n",
+ rank, (unsigned long long)(dims[0]), (unsigned long long)(dims[1]),
+ (unsigned long long)(dims[2]));
+ /* verify that file space dims are as expected and are consistent with memory space dims */
+ if (dims[0] != 0 || dims[1] != memdims[1] || dims[2] != memdims[2]){
+ fprintf(stderr, "dataset is not empty. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)dims[0], (unsigned long long)dims[1],
+ (unsigned long long)dims[2]);
+ return -1;
+ }
+
+ /* setup mem-space for buffer */
+ if ((m_sid=H5Screate_simple(rank, memdims, NULL))<0){
+ fprintf(stderr, "H5Screate_simple for memory failed\n");
+ return -1;
+ };
+
+ /* write planes */
+ count[0]=1;
+ count[1]=dims[1];
+ count[2]=dims[2];
+ for (i=0; i<UC_opts.nplanes; i++){
+ /* fill buffer with value i+1 */
+ bufptr = buffer;
+ for (j=0; j<dims[1]; j++)
+ for (k=0; k<dims[2]; k++)
+ *bufptr++ = i;
+
+ /* Cork the dataset's metadata in the cache, if SWMR is enabled */
+ if(UC_opts.use_swmr)
+ if(H5Odisable_mdc_flushes(dsid) < 0) {
+ fprintf(stderr, "H5Odisable_mdc_flushes failed\n");
+ return -1;
+ }
+
+ /* extend the dataset by one for new plane */
+ dims[0]=i+1;
+ if(H5Dset_extent(dsid, dims) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ /* Get the dataset's dataspace */
+ if((f_sid = H5Dget_space(dsid)) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ start[0]=i;
+ /* Choose the next plane to write */
+ if(H5Sselect_hyperslab(f_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0){
+ fprintf(stderr, "Failed H5Sselect_hyperslab\n");
+ return -1;
+ }
+
+ /* Write plane to the dataset */
+ if(H5Dwrite(dsid, UC_DATATYPE, m_sid, f_sid, H5P_DEFAULT, buffer) < 0){
+ fprintf(stderr, "Failed H5Dwrite\n");
+ return -1;
+ }
+
+ /* Uncork the dataset's metadata from the cache, if SWMR is enabled */
+ if(UC_opts.use_swmr)
+ if(H5Oenable_mdc_flushes(dsid) < 0) {
+ fprintf(stderr, "H5Oenable_mdc_flushes failed\n");
+ return -1;
+ }
+
+ /* flush file to make the just written plane available. */
+ if(H5Dflush(dsid) < 0)
+ {
+ fprintf(stderr, "Failed to H5Fflush file\n");
+ return -1;
+ }
+ }
+
+ /* Done writing. Free/Close all resources including data file */
+ HDfree(buffer);
+ if (H5Dclose(dsid) < 0){
+ fprintf(stderr, "Failed to close datasete\n");
+ return -1;
+ }
+ if (H5Sclose(m_sid) < 0){
+ fprintf(stderr, "Failed to close memory space\n");
+ return -1;
+ }
+ if (H5Sclose(f_sid) < 0){
+ fprintf(stderr, "Failed to close file space\n");
+ return -1;
+ }
+ if (H5Pclose(fapl) < 0){
+ fprintf(stderr, "Failed to property list\n");
+ return -1;
+ }
+ if (H5Fclose(fid) < 0){
+ fprintf(stderr, "Failed to close file id\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/* Read planes from the dataset.
+ * It expects the dataset is being changed (growing).
+ * It checks the unlimited dimension (1st one). When it increases,
+ * it will read in the new planes, one by one, and verify the data correctness.
+ * (The nth plan should contain all "n".)
+ * When the unlimited dimension grows to the chunksize (it becomes a cube),
+ * that is the expected end of data, the reader exits.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int read_uc_file(hbool_t towait)
+{
+ hid_t fapl; /* file access property list ID */
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dsid; /* dataset ID */
+ char *name;
+ UC_CTYPE *buffer, *bufptr; /* read data buffer */
+ hid_t f_sid; /* dataset file space id */
+ hid_t m_sid; /* memory space id */
+ int rank; /* rank */
+ hsize_t dims[3]; /* Dataspace dimensions */
+ hsize_t memdims[3]; /* Memory space dimensions */
+ hsize_t nplane=0, nplane_old=0; /* nth plane, last nth plane */
+ hsize_t start[3] = {0,0,0}, count[3]; /* Hyperslab selection values */
+ hsize_t j, k;
+ int nreadererr=0;
+ int nerrs;
+ int nonewplane;
+
+ /* Before reading, wait for the message that H5Fopen is complete--file lock is released */
+ if(towait && h5_wait_message(WRITER_MESSAGE) < 0) {
+ fprintf(stderr, "Cannot find writer message file...failed\n");
+ return -1;
+ }
+
+ name = UC_opts.filename;
+
+ /* Open the file */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+ if((fid = H5Fopen(name, H5F_ACC_RDONLY | (UC_opts.use_swmr ? H5F_ACC_SWMR_READ : 0), fapl)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+ if (H5Pclose(fapl) < 0){
+ fprintf(stderr, "Failed to property list\n");
+ return -1;
+ }
+
+
+ /* Open the dataset of the program name */
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+
+ /* allocate space for data buffer 1 X dims[1] X dims[2] of UC_CTYPE */
+ memdims[0]=1;
+ memdims[1] = UC_opts.dims[1];
+ memdims[2] = UC_opts.dims[2];
+ if ((buffer=(UC_CTYPE*)HDmalloc((size_t)memdims[1]*(size_t)memdims[2]*sizeof(UC_CTYPE)))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ return -1;
+ };
+
+ /*
+ * Get dataset rank and dimension.
+ * Verify dimension is as expected (unlimited,2*chunksize,2*chunksize).
+ */
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ rank = H5Sget_simple_extent_ndims(f_sid);
+ if (rank != UC_RANK){
+ fprintf(stderr, "rank(%d) of dataset does not match\n", rank);
+ return -1;
+ }
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ printf("dataset rank %d, dimensions %llu x %llu x %llu\n",
+ rank, (unsigned long long)(dims[0]), (unsigned long long)(dims[1]),
+ (unsigned long long)(dims[2]));
+ /* verify that file space dims are as expected and are consistent with memory space dims */
+ if (dims[1] != memdims[1] || dims[2] != memdims[2]){
+ fprintf(stderr, "dataset dimension is not as expected. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)dims[0], (unsigned long long)dims[1],
+ (unsigned long long)dims[2]);
+ fprintf(stderr, "But memdims=(%llu,%llu,%llu)\n",
+ (unsigned long long)memdims[0], (unsigned long long)memdims[1],
+ (unsigned long long)memdims[2]);
+ return -1;
+ }
+
+ /* setup mem-space for buffer */
+ if ((m_sid=H5Screate_simple(rank, memdims, NULL))<0){
+ fprintf(stderr, "H5Screate_simple for memory failed\n");
+ return -1;
+ };
+
+ /* Read 1 plane at a time whenever the dataset grows larger
+ * (along dim[0]) */
+ count[0]=1;
+ count[1]=dims[1];
+ count[2]=dims[2];
+ /* quit when all nplanes have been read */
+ nonewplane=0;
+ while (nplane_old < UC_opts.nplanes ){
+ /* print progress message according to if new planes are availalbe */
+ if (nplane_old < dims[0]) {
+ if (nonewplane){
+ /* end the previous message */
+ printf("\n");
+ nonewplane=0;
+ }
+ printf("reading planes %llu to %llu\n", (unsigned long long)nplane_old,
+ (unsigned long long)dims[0]);
+ }else{
+ if (nonewplane){
+ printf(".");
+ if (nonewplane>=30){
+ fprintf(stderr, "waited too long for new plane, quit.\n");
+ return -1;
+ }
+ }else{
+ /* print mesg only the first time; dots still no new plane */
+ printf("no new planes to read ");
+ }
+ nonewplane++;
+ /* pause for a second */
+ HDsleep(1);
+ }
+ for (nplane=nplane_old; nplane < dims[0]; nplane++){
+ /* read planes between last old nplanes and current extent */
+ /* Get the dataset's dataspace */
+ if((f_sid = H5Dget_space(dsid)) < 0){
+ fprintf(stderr, "H5Dget_space failed\n");
+ return -1;
+ }
+
+ start[0]=nplane;
+ /* Choose the next plane to read */
+ if(H5Sselect_hyperslab(f_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0){
+ fprintf(stderr, "H5Sselect_hyperslab failed\n");
+ return -1;
+ }
+
+ /* Read the plane from the dataset */
+ if(H5Dread(dsid, UC_DATATYPE, m_sid, f_sid, H5P_DEFAULT, buffer) < 0){
+ fprintf(stderr, "H5Dread failed\n");
+ return -1;
+ }
+
+ /* compare read data with expected data value which is nplane */
+ bufptr = buffer;
+ nerrs=0;
+ for (j=0; j<dims[1]; j++){
+ for (k=0; k<dims[2]; k++){
+ if ((hsize_t)*bufptr++ != nplane){
+ if (++nerrs < ErrorReportMax){
+ fprintf(stderr,
+ "found error %llu plane(%llu,%llu), expected %llu, got %d\n",
+ (unsigned long long)nplane, (unsigned long long)j,
+ (unsigned long long)k, (unsigned long long)nplane, (int)*(bufptr-1));
+ }
+ }
+ }
+ }
+ if (nerrs){
+ nreadererr++;
+ fprintf(stderr, "found %d unexpected values in plane %llu\n", nerrs,
+ (unsigned long long)nplane);
+ }
+ }
+ /* Have read all current planes */
+ nplane_old=dims[0];
+
+ /* check if dataset has grown since last time */
+#if 0
+ /* close dsid and file, then reopen them */
+ if (H5Dclose(dsid) < 0){
+ fprintf(stderr, "H5Dclose failed\n");
+ return -1;
+ }
+ if (H5Fclose(fid) < 0){
+ fprintf(stderr, "H5Fclose failed\n");
+ return -1;
+ }
+ if((fid = H5Fopen(name, H5F_ACC_RDONLY | (UC_opts.use_swmr ? H5F_ACC_SWMR_READ : 0), H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+#else
+ H5Drefresh(dsid);
+#endif
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ }
+
+ if (nreadererr)
+ return -1;
+ else
+ return 0;
+}
+
+#endif /* H5_HAVE_FORK */
+
diff --git a/test/use_disable_mdc_flushes.c b/test/use_disable_mdc_flushes.c
new file mode 100644
index 0000000..2915cc3
--- /dev/null
+++ b/test/use_disable_mdc_flushes.c
@@ -0,0 +1,549 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This is copied from use_append_chunk.c with modifications to show
+ * the usage of H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled public routines.
+ */
+
+#include "h5test.h"
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
+#define H5D_FRIEND /*suppress error about including H5Dpkg */
+#define H5D_TESTING
+#include "H5Dpkg.h"
+
+/* Global Variable definitions */
+const char *progname_g="use_disable_mdc_flushes"; /* program name */
+
+/* these two definitions must match each other */
+#define UC_DATATYPE H5T_NATIVE_SHORT /* use case HDF5 data type */
+#define UC_CTYPE short /* use case C data type */
+#define UC_RANK 3 /* use case dataset rank */
+#define Chunksize_DFT 256 /* chunksize default */
+#define Hgoto_error(val) {ret_value=val; goto done;}
+
+
+char *filename_g;
+hsize_t nplanes_g;
+int use_swmr_g;
+int chunkplanes_g;
+int chunksize_g;
+hsize_t dims_g[UC_RANK];
+hsize_t max_dims_g[UC_RANK];
+hsize_t chunkdims_g[UC_RANK];
+
+static void usage(const char *prog);
+static int parse_option(int argc, char * const argv[]);
+static void show_parameters(void);
+static int create_file(void);
+static int setup_parameters(int argc, char * const argv[]);
+
+/*
+ * Note: Long options are not yet implemented.
+ *
+ * usage: use_disable_mdc_flushes [OPTIONS]
+ * OPTIONS
+ * -h, --help Print a usage message and exit
+ * -f FN Test file name [default: use_disable_mdc_flushes.h5]
+ * -n N, --nplanes=N Number of planes to write. [default: 1000]
+ * -s N, --swmr=N Use SWMR mode (0: no, non-0: yes) default is yes
+ * -z N, --chunksize=N Chunk size [default: 256]
+ * -y N, --chunkplanes=N Number of planes per chunk [default: 1]
+ */
+static void
+usage(const char *prog)
+{
+ fprintf(stderr, "usage: %s [OPTIONS]\n", prog);
+ fprintf(stderr, " OPTIONS\n");
+ fprintf(stderr, " -h Print a usage message and exit\n");
+ fprintf(stderr, " -f FN Test file name [default: %s.h5]\n", prog);
+ fprintf(stderr, " -n N Number of planes to write. [default: 1000]\n");
+ fprintf(stderr, " -s N Use SWMR mode (0: no, non-0: yes) default is yes\n");
+ fprintf(stderr, " -z N Chunk size [default: %d]\n", Chunksize_DFT);
+ fprintf(stderr, " -y N Number of planes per chunk [default: 1]\n");
+ fprintf(stderr, "\n");
+} /* usage() */
+
+
+/*
+ * Setup Use Case parameters by parsing command line options.
+ * Setup default values if not set by options. */
+static int
+parse_option(int argc, char * const argv[])
+{
+ int ret_value=0;
+ int c;
+ /* command line options: See function usage for a description */
+ const char *cmd_options = "f:hn:s:y:z:";
+
+ /* suppress getopt from printing error */
+ opterr = 0;
+
+ while (1){
+ c = getopt (argc, argv, cmd_options);
+ if (-1 == c)
+ break;
+ switch (c) {
+ case 'h':
+ usage(progname_g);
+ exit(0);
+ break;
+ case 'f': /* usecase data file name */
+ filename_g = optarg;
+ break;
+ case 'n': /* number of planes to write/read */
+ if ((nplanes_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad number of planes %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 's': /* use swmr file open mode */
+ if ((use_swmr_g = atoi(optarg)) < 0){
+ fprintf(stderr, "swmr value should be 0(no) or 1(yes)\n");
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'y': /* Number of planes per chunk */
+ if ((chunkplanes_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad number of planes per chunk %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'z': /* size of chunk=(z,z) */
+ if ((chunksize_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad chunksize %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case '?':
+ fprintf(stderr, "getopt returned '%c'.\n", c);
+ Hgoto_error(-1);
+ default:
+ fprintf(stderr, "getopt returned unexpected value.\n");
+ fprintf(stderr, "Unexpected value is %d\n", c);
+ Hgoto_error(-1);
+ }
+ }
+
+ /* set test file name if not given */
+ if (!filename_g){
+ /* default data file name is <progname>.h5 */
+ if ((filename_g = (char*)HDmalloc(HDstrlen(progname_g)+4))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ Hgoto_error(-1);
+ };
+ HDstrcpy(filename_g, progname_g);
+ HDstrcat(filename_g, ".h5");
+ }
+
+done:
+ /* All done. */
+ return(ret_value);
+} /* parse_option() */
+
+/* Show parameters used for this use case */
+static void
+show_parameters(void)
+{
+ printf("===Parameters used:===\n");
+ printf("chunk dims=(%llu, %llu, %llu)\n", (unsigned long long)chunkdims_g[0],
+ (unsigned long long)chunkdims_g[1], (unsigned long long)chunkdims_g[2]);
+ printf("dataset max dims=(%llu, %llu, %llu)\n", (unsigned long long)max_dims_g[0],
+ (unsigned long long)max_dims_g[1], (unsigned long long)max_dims_g[2]);
+ printf("number of planes to write=%llu\n", (unsigned long long)nplanes_g);
+ printf("using SWMR mode=%s\n", use_swmr_g ? "yes(1)" : "no(0)");
+ printf("data filename=%s\n", filename_g);
+ printf("===Parameters shown===\n");
+} /* show_parameters() */
+
+/*
+ * Setup parameters for the use case.
+ * Return: 0 succeed; -1 fail.
+ */
+static int
+setup_parameters(int argc, char * const argv[])
+{
+ /* use case defaults */
+ chunksize_g = Chunksize_DFT;
+ use_swmr_g = 1; /* use swmr open */
+ chunkplanes_g = 1;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0){
+ return(-1);
+ }
+ /* set chunk dims */
+ chunkdims_g[0] = chunkplanes_g;
+ chunkdims_g[1]= chunkdims_g[2] = chunksize_g;
+
+ /* set dataset initial and max dims */
+ dims_g[0] = 0;
+ max_dims_g[0] = H5S_UNLIMITED;
+ dims_g[1] = dims_g[2] = max_dims_g[1] = max_dims_g[2] = chunksize_g;
+
+ /* set nplanes */
+ if (nplanes_g == 0)
+ nplanes_g = chunksize_g;
+
+ /* show parameters and return */
+ show_parameters();
+ return(0);
+} /* setup_parameters() */
+
+/*
+ * Create the skeleton use case file for testing.
+ * It has one 3d dataset using chunked storage.
+ * The dataset is (unlimited, chunksize, chunksize).
+ * Dataset type is 2 bytes integer.
+ * It starts out "empty", i.e., first dimension is 0.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+static int
+create_file(void)
+{
+ hsize_t dims[3]; /* Dataset starting dimensions */
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t sid; /* Dataspace ID */
+ hid_t dsid; /* Dataset ID */
+ hid_t fapl; /* File access property list */
+ H5D_chunk_index_t idx_type; /* Chunk index type */
+
+ /* Create the file */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return -1;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fcreate(filename_g, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ return -1;
+
+ /* Set up dimension sizes */
+ dims[0] = 0;
+ dims[1] = dims[2] = max_dims_g[1];
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(3, dims, max_dims_g)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 3, chunkdims_g) < 0)
+ return -1;
+
+ /* create dataset of progname */
+ if((dsid = H5Dcreate2(fid, progname_g, UC_DATATYPE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Check that the chunk index type is not version 1 B-tree.
+ * Version 1 B-trees are not supported under SWMR.
+ */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0)
+ return -1;
+ if(idx_type == H5D_CHUNK_IDX_BTREE) {
+ fprintf(stderr, "ERROR: Chunk index is version 1 B-tree: aborting.\n");
+ return -1;
+ }
+
+ /* Close everything */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ if(H5Pclose(fapl) < 0)
+ return -1;
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ return 0;
+} /* create_file() */
+
+/*
+ * Append planes, each of (1,2*chunksize,2*chunksize) to the dataset.
+ * In other words, 4 chunks are appended to the dataset at a time.
+ * Fill each plane with the plane number and then write it at the nth plane.
+ * Increase the plane number and repeat till the end of dataset, when it
+ * reaches chunksize long. End product is a (2*chunksize)^3 cube.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+static int
+write_file(void)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dsid; /* dataset ID */
+ hid_t fapl; /* File access property list */
+ hid_t dcpl; /* Dataset creation property list */
+ char *name;
+ UC_CTYPE *buffer, *bufptr; /* data buffer */
+ hsize_t cz=chunksize_g; /* Chunk size */
+ hid_t f_sid; /* dataset file space id */
+ hid_t m_sid; /* memory space id */
+ int rank; /* rank */
+ hsize_t chunk_dims[3]; /* Chunk dimensions */
+ hsize_t dims[3]; /* Dataspace dimensions */
+ hsize_t memdims[3]; /* Memory space dimensions */
+ hsize_t start[3] = {0,0,0}, count[3]; /* Hyperslab selection values */
+ hbool_t disabled; /* Object's disabled status */
+ hsize_t i, j, k;
+
+ name = filename_g;
+
+ /* Open the file */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return -1;
+ if(use_swmr_g)
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fopen(name, H5F_ACC_RDWR | (use_swmr_g ? H5F_ACC_SWMR_WRITE : 0), fapl)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+
+ /* Open the dataset of the program name */
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+
+ /* Disabled mdc flushed for the dataset */
+ if(H5Odisable_mdc_flushes(dsid) < 0) {
+ fprintf(stderr, "H5Odisable_mdc_flushes failed\n");
+ return -1;
+ }
+
+ /* Get mdc disabled status of the dataset */
+ if(H5Oare_mdc_flushes_disabled(dsid, &disabled) < 0) {
+ fprintf(stderr, "H5Oare_mdc_flushes_disabled failed\n");
+ return -1;
+ } else if(disabled)
+ printf("Dataset has disabled mdc flushes.\n");
+ else
+ printf("Dataset should have disabled its mdc flushes.\n");
+
+ /* Find chunksize used */
+ if ((dcpl = H5Dget_create_plist(dsid)) < 0){
+ fprintf(stderr, "H5Dget_create_plist failed\n");
+ return -1;
+ }
+ if (H5D_CHUNKED != H5Pget_layout(dcpl)){
+ fprintf(stderr, "storage layout is not chunked\n");
+ return -1;
+ }
+ if ((rank = H5Pget_chunk(dcpl, 3, chunk_dims)) != 3){
+ fprintf(stderr, "storage rank is not 3\n");
+ return -1;
+ }
+
+ /* verify chunk_dims against set paramenters */
+ if (chunk_dims[0]!= chunkdims_g[0] || chunk_dims[1] != cz || chunk_dims[2] != cz){
+ fprintf(stderr, "chunk size is not as expected. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)chunk_dims[0], (unsigned long long)chunk_dims[1],
+ (unsigned long long)chunk_dims[2]);
+ return -1;
+ }
+
+ /* allocate space for data buffer 1 X dims[1] X dims[2] of UC_CTYPE */
+ memdims[0]=1;
+ memdims[1] = dims_g[1];
+ memdims[2] = dims_g[2];
+ if ((buffer=(UC_CTYPE*)HDmalloc((size_t)memdims[1]*(size_t)memdims[2]*sizeof(UC_CTYPE)))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ return -1;
+ };
+
+ /*
+ * Get dataset rank and dimension.
+ */
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ rank = H5Sget_simple_extent_ndims(f_sid);
+ if (rank != UC_RANK){
+ fprintf(stderr, "rank(%d) of dataset does not match\n", rank);
+ return -1;
+ }
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ printf("dataset rank %d, dimensions %llu x %llu x %llu\n",
+ rank, (unsigned long long)(dims[0]), (unsigned long long)(dims[1]),
+ (unsigned long long)(dims[2]));
+ /* verify that file space dims are as expected and are consistent with memory space dims */
+ if (dims[0] != 0 || dims[1] != memdims[1] || dims[2] != memdims[2]){
+ fprintf(stderr, "dataset is not empty. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)dims[0], (unsigned long long)dims[1],
+ (unsigned long long)dims[2]);
+ return -1;
+ }
+
+ /* setup mem-space for buffer */
+ if ((m_sid=H5Screate_simple(rank, memdims, NULL))<0){
+ fprintf(stderr, "H5Screate_simple for memory failed\n");
+ return -1;
+ };
+
+ /* write planes */
+ count[0]=1;
+ count[1]=dims[1];
+ count[2]=dims[2];
+ for (i=0; i<nplanes_g; i++){
+ /* fill buffer with value i+1 */
+ bufptr = buffer;
+ for (j=0; j<dims[1]; j++)
+ for (k=0; k<dims[2]; k++)
+ *bufptr++ = i;
+
+ /* extend the dataset by one for new plane */
+ dims[0]=i+1;
+ if(H5Dset_extent(dsid, dims) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ /* Get the dataset's dataspace */
+ if((f_sid = H5Dget_space(dsid)) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ start[0]=i;
+ /* Choose the next plane to write */
+ if(H5Sselect_hyperslab(f_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0){
+ fprintf(stderr, "Failed H5Sselect_hyperslab\n");
+ return -1;
+ }
+
+ /* Write plane to the dataset */
+ if(H5Dwrite(dsid, UC_DATATYPE, m_sid, f_sid, H5P_DEFAULT, buffer) < 0){
+ fprintf(stderr, "Failed H5Dwrite\n");
+ return -1;
+ }
+
+ /* Flush the dataset for every "chunkplanes_g" planes */
+ if(!((i + 1) % (hsize_t)chunkplanes_g)) {
+ if(H5Dflush(dsid) < 0) {
+ fprintf(stderr, "Failed to H5Dflush dataset\n");
+ return -1;
+ }
+ }
+ }
+
+ if(H5Dflush(dsid) < 0) {
+ fprintf(stderr, "Failed to H5Dflush dataset\n");
+ return -1;
+ }
+
+ /* Enable mdc flushes for the dataset */
+ /* Closing the dataset later will enable mdc flushes automatically if this is not done */
+ if(disabled)
+ if(H5Oenable_mdc_flushes(dsid) < 0) {
+ fprintf(stderr, "Failed to H5Oenable_mdc_flushes\n");
+ return -1;
+ }
+
+ /* Done writing. Free/Close all resources including data file */
+ HDfree(buffer);
+
+ if(H5Dclose(dsid) < 0){
+ fprintf(stderr, "Failed to close datasete\n");
+ return -1;
+ }
+ if(H5Sclose(m_sid) < 0){
+ fprintf(stderr, "Failed to close memory space\n");
+ return -1;
+ }
+ if(H5Sclose(f_sid) < 0){
+ fprintf(stderr, "Failed to close file space\n");
+ return -1;
+ }
+ if(H5Pclose(fapl) < 0){
+ fprintf(stderr, "Failed to property list\n");
+ return -1;
+ }
+ if(H5Fclose(fid) < 0){
+ fprintf(stderr, "Failed to close file id\n");
+ return -1;
+ }
+
+ return 0;
+} /* write_file() */
+
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created test files needed and close it;
+ * Write to the file.
+ */
+int
+main(int argc, char *argv[])
+{
+ int ret_value = 0;
+
+ /* initialization */
+ if(setup_parameters(argc, argv) < 0)
+ Hgoto_error(1);
+
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ printf("Creating skeleton data file for testing H5Odisable_mdc_flushes()...\n");
+ if(create_file() < 0) {
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ } /* end if */
+ else
+ printf("File created.\n");
+
+ printf("writing to the file\n");
+ if(write_file() < 0) {
+ fprintf(stderr, "write_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+done:
+ /* Print result and exit */
+ if(ret_value != 0)
+ printf("Error(s) encountered\n");
+ else
+ printf("All passed\n");
+
+ return(ret_value);
+}
+
+#else /* H5_HAVE_FORK */
+
+int
+main(void)
+{
+ HDfprintf(stderr, "Non-POSIX platform. Skipping.\n");
+ return EXIT_SUCCESS;
+} /* end main() */
+
+#endif /* H5_HAVE_FORK */
+
diff --git a/test/vds_swmr.h b/test/vds_swmr.h
new file mode 100644
index 0000000..c043fd6
--- /dev/null
+++ b/test/vds_swmr.h
@@ -0,0 +1,165 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef VDS_SWMR_H
+#define VDS_SWMR_H
+
+#include <hdf5.h>
+
+/* virtual dataset <---> source dataset mapping and sizes
+
+ ***************** --+
+ * A * K
+ ***************** --+
+ * * |
+ * B * N
+ * * |
+ ***************** --+
+ * C *
+ *****************
+ * *
+ * D *
+ * *
+ *****************
+ * E *
+ *****************
+ * *
+ * F *
+ * *
+ *****************
+
+ | |
+ +-------M-------+
+
+
+ dim[0]
+ /
+ /
+ /
+ -----> dim[2]
+ |
+ |
+ |
+ dim[1]
+
+
+ NOTE: This use case also checks for varying numbers of written planes.
+ Dataset A contains the full number of planes and each successive
+ dataset contains one fewer plane, down to the last dataset, which
+ contains zero planes. Each dataset is set to have an (unlimited
+ dimension) extent equal to the number of planes written, so the
+ "empty" regions will contain the VDS fill value.
+*/
+
+
+/* All datasets are 3D */
+#define RANK 3
+
+/* Lengths of string identifiers (file, dataset names, etc.) */
+#define NAME_LEN 32
+
+/* Compression level */
+#define COMPRESSION_LEVEL 7
+
+/* Number of source files */
+#define N_SOURCES 6
+
+/* Dataset dimensions */
+#define SM_HEIGHT 2 /* K */
+#define LG_HEIGHT 4 /* N */
+#define SM_LG_HEIGHT 6 /* SM_HEIGHT + LG_HEIGHT */
+#define FULL_HEIGHT 18 /* (3 * K) + (3 * N) */
+#define HALF_HEIGHT 9
+#define WIDTH 8 /* M */
+#define HALF_WIDTH 4
+
+/* Max number of planes in the dataset */
+#define N_MAX_PLANES H5S_UNLIMITED
+
+/* Number of planes each writer will write */
+#define N_PLANES_TO_WRITE 25
+
+/* Dataset datatypes */
+#define SOURCE_DATATYPE H5T_STD_I32LE
+#define VDS_DATATYPE H5T_STD_I32LE
+
+/* Starting size of datasets, both source and VDS */
+static hsize_t DIMS[N_SOURCES][RANK] = {
+ {0, SM_HEIGHT, WIDTH},
+ {0, LG_HEIGHT, WIDTH},
+ {0, SM_HEIGHT, WIDTH},
+ {0, LG_HEIGHT, WIDTH},
+ {0, SM_HEIGHT, WIDTH},
+ {0, LG_HEIGHT, WIDTH}
+};
+static hsize_t VDS_DIMS[RANK] = {0, FULL_HEIGHT, WIDTH};
+
+/* Maximum size of datasets, both source and VDS.
+ * NOTE: Theoretical (i.e.: H5S_UNLIMITED), not the actual max
+ * number of planes written out by the writers before they stop.
+ * That number is specified separately.
+ */
+static hsize_t MAX_DIMS[N_SOURCES][RANK] = {
+ {N_MAX_PLANES, SM_HEIGHT, WIDTH},
+ {N_MAX_PLANES, LG_HEIGHT, WIDTH},
+ {N_MAX_PLANES, SM_HEIGHT, WIDTH},
+ {N_MAX_PLANES, LG_HEIGHT, WIDTH},
+ {N_MAX_PLANES, SM_HEIGHT, WIDTH},
+ {N_MAX_PLANES, LG_HEIGHT, WIDTH}
+};
+static hsize_t VDS_MAX_DIMS[RANK] = {N_MAX_PLANES, FULL_HEIGHT, WIDTH};
+
+/* Planes */
+static hsize_t PLANES[N_SOURCES][RANK] = {
+ {1, SM_HEIGHT, WIDTH},
+ {1, LG_HEIGHT, WIDTH},
+ {1, SM_HEIGHT, WIDTH},
+ {1, LG_HEIGHT, WIDTH},
+ {1, SM_HEIGHT, WIDTH},
+ {1, LG_HEIGHT, WIDTH}
+};
+static hsize_t VDS_PLANE[RANK] = {1, FULL_HEIGHT, WIDTH};
+
+/* File names for source datasets */
+static char FILE_NAMES[N_SOURCES][NAME_LEN] = {
+ {"vds_swmr_src_a.h5"},
+ {"vds_swmr_src_b.h5"},
+ {"vds_swmr_src_c.h5"},
+ {"vds_swmr_src_d.h5"},
+ {"vds_swmr_src_e.h5"},
+ {"vds_swmr_src_f.h5"}
+};
+
+/* VDS file name */
+static char VDS_FILE_NAME[NAME_LEN] = "vds_swmr.h5";
+
+/* Dataset names */
+static char SOURCE_DSET_NAME[NAME_LEN] = "source_dset";
+static char SOURCE_DSET_PATH[NAME_LEN] = "/source_dset";
+static char VDS_DSET_NAME[NAME_LEN] = "vds_dset";
+
+/* Fill values */
+static int32_t FILL_VALUES[N_SOURCES] = {
+ -1,
+ -2,
+ -3,
+ -4,
+ -5,
+ -6
+};
+static int32_t VDS_FILL_VALUE = -9;
+
+#endif /* VDS_SWMR_H */
+
diff --git a/test/vds_swmr_gen.c b/test/vds_swmr_gen.c
new file mode 100644
index 0000000..60f081e
--- /dev/null
+++ b/test/vds_swmr_gen.c
@@ -0,0 +1,178 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "h5test.h"
+#include "vds_swmr.h"
+
+int
+main(void)
+{
+ hid_t faplid = -1; /* file access property list ID (all files) */
+
+ hid_t src_sid = -1; /* source dataset's dataspace ID */
+ hid_t src_dcplid = -1; /* source dataset property list ID */
+
+ hid_t vds_sid = -1; /* VDS dataspace ID */
+ hid_t vds_dcplid = -1; /* VDS dataset property list ID */
+
+ hid_t fid = -1; /* HDF5 file ID */
+ hid_t did = -1; /* dataset ID */
+
+ hsize_t start[RANK]; /* starting point for hyperslab */
+ int map_start = -1; /* starting point in the VDS map */
+
+ int i; /* iterator */
+
+
+ /* Start by creating the virtual dataset (VDS) dataspace and creation
+ * property list. The individual source datasets are then created
+ * and the VDS map (stored in the VDS property list) is updated.
+ */
+
+ /* Create VDS dcpl */
+ if((vds_dcplid = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(vds_dcplid, VDS_DATATYPE,
+ &VDS_FILL_VALUE) < 0)
+ TEST_ERROR
+
+ /* Create VDS dataspace */
+ if((vds_sid = H5Screate_simple(RANK, VDS_DIMS,
+ VDS_MAX_DIMS)) < 0)
+ TEST_ERROR
+
+ /************************************
+ * Create source files and datasets *
+ ************************************/
+
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = 0;
+ map_start = 0;
+
+ /* All SWMR files need to use the latest file format */
+ if((faplid = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR
+ if(H5Pset_libver_bounds(faplid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ TEST_ERROR
+
+ for(i = 0; i < N_SOURCES; i++) {
+
+ /* source dataset dcpl */
+ if((src_dcplid = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_chunk(src_dcplid, RANK, PLANES[i]) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(src_dcplid, SOURCE_DATATYPE,
+ &FILL_VALUES[i]) < 0)
+ TEST_ERROR
+
+ /* Use a mix of compressed and uncompressed datasets */
+ if(0 != i % 2)
+ if(H5Pset_deflate(src_dcplid, COMPRESSION_LEVEL) < 0)
+ TEST_ERROR
+
+ /* Create source file, dataspace, and dataset */
+ if((fid = H5Fcreate(FILE_NAMES[i], H5F_ACC_TRUNC,
+ H5P_DEFAULT, faplid)) < 0)
+ TEST_ERROR
+ if((src_sid = H5Screate_simple(RANK, DIMS[i],
+ MAX_DIMS[i])) < 0)
+ TEST_ERROR
+ if((did = H5Dcreate2(fid, SOURCE_DSET_NAME,
+ SOURCE_DATATYPE, src_sid,
+ H5P_DEFAULT, src_dcplid, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* set up hyperslabs for source and destination datasets */
+ start[1] = 0;
+ if(H5Sselect_hyperslab(src_sid, H5S_SELECT_SET, start, NULL,
+ MAX_DIMS[i], NULL) < 0)
+ TEST_ERROR
+ start[1] = map_start;
+ if(H5Sselect_hyperslab(vds_sid, H5S_SELECT_SET, start, NULL,
+ MAX_DIMS[i], NULL) < 0)
+ TEST_ERROR
+ map_start += PLANES[i][1];
+
+ /* Add VDS mapping */
+ if(H5Pset_virtual(vds_dcplid, vds_sid, FILE_NAMES[i],
+ SOURCE_DSET_PATH, src_sid) < 0)
+ TEST_ERROR
+
+ /* close */
+ if(H5Sclose(src_sid) < 0)
+ TEST_ERROR
+ if(H5Pclose(src_dcplid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ } /* end for */
+
+
+ /*******************
+ * Create VDS file *
+ *******************/
+
+ /* file */
+ if((fid = H5Fcreate(VDS_FILE_NAME, H5F_ACC_TRUNC,
+ H5P_DEFAULT, faplid)) < 0)
+ TEST_ERROR
+
+ /* dataset */
+ if((did = H5Dcreate2(fid, VDS_DSET_NAME, VDS_DATATYPE, vds_sid,
+ H5P_DEFAULT, vds_dcplid, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* close */
+ if(H5Pclose(faplid) < 0)
+ TEST_ERROR
+ if(H5Pclose(vds_dcplid) < 0)
+ TEST_ERROR
+ if(H5Sclose(vds_sid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ return EXIT_SUCCESS;
+
+error:
+
+ H5E_BEGIN_TRY {
+ if(faplid >= 0)
+ (void)H5Pclose(faplid);
+ if(src_sid >= 0)
+ (void)H5Sclose(src_sid);
+ if(src_dcplid >= 0)
+ (void)H5Pclose(src_dcplid);
+ if(vds_sid >= 0)
+ (void)H5Sclose(vds_sid);
+ if(vds_dcplid >= 0)
+ (void)H5Pclose(vds_dcplid);
+ if(fid >= 0)
+ (void)H5Fclose(fid);
+ if(did >= 0)
+ (void)H5Dclose(did);
+ } H5E_END_TRY
+
+ return EXIT_FAILURE;
+
+} /* end main */
+
diff --git a/test/vds_swmr_reader.c b/test/vds_swmr_reader.c
new file mode 100644
index 0000000..7ef2e90
--- /dev/null
+++ b/test/vds_swmr_reader.c
@@ -0,0 +1,142 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "h5test.h"
+#include "vds_swmr.h"
+
+int
+main(void)
+{
+ hid_t fid = -1; /* HDF5 file ID */
+ hid_t did = -1; /* dataset ID */
+ hid_t msid = -1; /* memory dataspace ID */
+ hid_t fsid = -1; /* file dataspace ID */
+
+ hsize_t start[RANK]; /* hyperslab start point */
+
+ int n_elements = 0; /* size of buffer (elements) */
+ size_t size = 0; /* size of buffer (bytes) */
+ int *buffer = NULL; /* data buffer */
+
+ int n_dims = -1; /* # dimensions in dataset */
+ hsize_t dims[RANK]; /* current size of dataset */
+ hsize_t max_dims[RANK]; /* max size of dataset */
+
+ hbool_t has_errors = FALSE;/* if the read data contains errors */
+
+
+ /* Open the VDS file and dataset */
+ if((fid = H5Fopen(VDS_FILE_NAME, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if((did = H5Dopen2(fid, VDS_DSET_NAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Create the read buffer */
+ n_elements = VDS_PLANE[1] * VDS_PLANE[2];
+ size = n_elements * sizeof(int);
+ if(NULL == (buffer = (int *)HDmalloc(size)))
+ TEST_ERROR
+
+ /* Create memory dataspace */
+ if((msid = H5Screate_simple(RANK, VDS_PLANE, NULL)) < 0)
+ TEST_ERROR
+
+ /* Read data until the dataset is full (via the writer) */
+ do {
+
+ /* Refresh metadata */
+ if(H5Drefresh(did) < 0)
+ TEST_ERROR
+
+ /* Get the dataset dimensions */
+ if((fsid = H5Dget_space(did)) < 0)
+ TEST_ERROR
+ if(H5Sget_simple_extent_dims(fsid, dims, max_dims) < 0)
+ TEST_ERROR
+
+ /* Check the reported size of the VDS */
+ if((n_dims = H5Sget_simple_extent_ndims(fsid)) < 0)
+ TEST_ERROR
+ if(n_dims != RANK)
+ TEST_ERROR
+ if(H5Sget_simple_extent_dims(fsid, dims, max_dims) < 0)
+ TEST_ERROR
+ /* NOTE: Don't care what dims[0] is. */
+ if(dims[1] != FULL_HEIGHT)
+ TEST_ERROR
+ if(dims[2] != WIDTH)
+ TEST_ERROR
+ if(max_dims[0] != H5S_UNLIMITED)
+ TEST_ERROR
+ if(max_dims[1] != FULL_HEIGHT)
+ TEST_ERROR
+ if(max_dims[2] != WIDTH)
+ TEST_ERROR
+
+ /* Continue if there's nothing to read */
+ if(0 == dims[0]) {
+ if(H5Sclose(fsid) < 0)
+ TEST_ERROR
+ continue;
+ }
+
+ /* Read a plane from the VDS */
+ /* At this time, we just make sure we can read planes without errors. */
+ start[0] = dims[0] - 1;
+ start[1] = 0;
+ start[2] = 0;
+ if(H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, VDS_PLANE, NULL) < 0)
+ TEST_ERROR
+ if(H5Dread(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, buffer) < 0)
+ TEST_ERROR
+
+ if(H5Sclose(fsid) < 0)
+ TEST_ERROR
+
+ } while (dims[0] < N_PLANES_TO_WRITE);
+
+ /* Close file and dataset */
+ if(H5Sclose(msid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ HDfree(buffer);
+
+ HDfprintf(stderr, "SWMR reader exited successfully\n");
+ return EXIT_SUCCESS;
+
+error:
+
+ H5E_BEGIN_TRY {
+ if(fid >= 0)
+ (void)H5Fclose(fid);
+ if(did >= 0)
+ (void)H5Dclose(did);
+ if(msid >= 0)
+ (void)H5Sclose(msid);
+ if(fsid >= 0)
+ (void)H5Sclose(fsid);
+ if(buffer != NULL)
+ HDfree(buffer);
+ } H5E_END_TRY
+
+ HDfprintf(stderr, "ERROR: SWMR reader exited with errors\n");
+ return EXIT_FAILURE;
+
+} /* end main */
+
diff --git a/test/vds_swmr_writer.c b/test/vds_swmr_writer.c
new file mode 100644
index 0000000..be7548c
--- /dev/null
+++ b/test/vds_swmr_writer.c
@@ -0,0 +1,167 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+
+#include "h5test.h"
+#include "vds_swmr.h"
+
+int
+main(int argc, char *argv[])
+{
+ int file_number = -1; /* Source file number */
+
+ hid_t fid = -1; /* HDF5 file ID */
+ hid_t faplid = -1; /* file access property list ID */
+ hid_t did = -1; /* dataset ID */
+ hid_t msid = -1; /* memory dataspace ID */
+ hid_t fsid = -1; /* file dataspace ID */
+
+ hsize_t extent[RANK]; /* dataset extents */
+ hsize_t start[RANK]; /* hyperslab start point */
+
+ int *buffer = NULL; /* data buffer */
+ int value = -1; /* value written to datasets */
+
+ hsize_t n_elements = 0; /* number of elements in a plane */
+
+ hsize_t i; /* iterator */
+ hsize_t j; /* iterator */
+
+
+ /******************************
+ * Fill a source dataset file *
+ ******************************/
+
+ /* The file number is passed on the command line.
+ * This is an integer index into the FILE_NAMES array.
+ */
+ if(argc != 2) {
+ HDfprintf(stderr, "ERROR: Must pass the source file number on the command line.\n");
+ return EXIT_FAILURE;
+ }
+
+ file_number = HDatoi(argv[1]);
+ if(file_number < 0 || file_number >= N_SOURCES)
+ TEST_ERROR
+
+ /* Open the source file and dataset */
+ /* All SWMR files need to use the latest file format */
+ if((faplid = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR
+ if(H5Pset_libver_bounds(faplid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ TEST_ERROR
+ if((fid = H5Fopen(FILE_NAMES[file_number], H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, faplid)) < 0)
+ TEST_ERROR
+ if((did = H5Dopen2(fid, SOURCE_DSET_PATH, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+
+ /* Create a data buffer that represents a plane */
+ n_elements = PLANES[file_number][1] * PLANES[file_number][2];
+ if(NULL == (buffer = (int *)HDmalloc(n_elements * sizeof(int))))
+ TEST_ERROR
+
+ /* Create the memory dataspace */
+ if((msid = H5Screate_simple(RANK, PLANES[file_number], NULL)) < 0)
+ TEST_ERROR
+
+ /* Write planes to the dataset */
+ for(i = 0; i < N_PLANES_TO_WRITE; i++) {
+
+ unsigned delay; /* Time interval between plane writes */
+
+ /* Cork the dataset's metadata in the cache */
+ if(H5Odisable_mdc_flushes(did) < 0)
+ TEST_ERROR
+
+ /* Set the dataset's extent. This is inefficient but that's ok here. */
+ extent[0] = i + 1;
+ extent[1] = PLANES[file_number][1];
+ extent[2] = PLANES[file_number][2];
+ if(H5Dset_extent(did, extent) < 0)
+ TEST_ERROR
+
+ /* Get the file dataspace */
+ if((fsid = H5Dget_space(did)) < 0)
+ TEST_ERROR
+
+ /* Each plane is filled with the plane number as a data value. */
+ value = (((int)i + 1) * 10) + (int)i;
+ for(j = 0; j < n_elements; j++)
+ buffer[j] = value;
+
+ /* Set up the hyperslab for writing. */
+ start[0] = i;
+ start[1] = 0;
+ start[2] = 0;
+ if(H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, PLANES[file_number], NULL) < 0)
+ TEST_ERROR
+
+ /* Write the plane to the dataset. */
+ if(H5Dwrite(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, buffer) < 0)
+ TEST_ERROR
+
+ /* Uncork the dataset's metadata from the cache */
+ if(H5Oenable_mdc_flushes(did) < 0)
+ TEST_ERROR
+
+ /* Wait one second between writing planes */
+ delay = HDtime(0) + 1;
+ while(HDtime(0) < delay)
+ ;
+
+ /* Flush */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ TEST_ERROR
+
+ } /* end for */
+
+ if(H5Pclose(faplid) < 0)
+ TEST_ERROR
+ if(H5Sclose(msid) < 0)
+ TEST_ERROR
+ if(H5Sclose(fsid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+ HDfree(buffer);
+
+ HDfprintf(stderr, "SWMR writer exited successfully\n");
+ return EXIT_SUCCESS;
+
+error:
+
+ H5E_BEGIN_TRY {
+ if(fid >= 0)
+ (void)H5Fclose(fid);
+ if(faplid >= 0)
+ (void)H5Pclose(faplid);
+ if(did >= 0)
+ (void)H5Dclose(did);
+ if(msid >= 0)
+ (void)H5Sclose(msid);
+ if(fsid >= 0)
+ (void)H5Sclose(fsid);
+ if(buffer != NULL)
+ HDfree(buffer);
+ } H5E_END_TRY
+
+ HDfprintf(stderr, "ERROR: SWMR writer exited with errors\n");
+ return EXIT_FAILURE;
+
+} /* end main */
+