summaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/CMakeLists.txt243
-rw-r--r--test/Makefile.am14
-rw-r--r--test/Makefile.in97
-rw-r--r--test/accum.c1809
-rw-r--r--test/be_data.h5bin2288 -> 40320 bytes
-rw-r--r--test/big.c408
-rw-r--r--test/bittests.c39
-rwxr-xr-xtest/cross_read.c862
-rw-r--r--test/dangle.c129
-rw-r--r--test/dsets.c383
-rw-r--r--test/dt_arith.c34
-rw-r--r--test/err_compat.c394
-rw-r--r--test/external.c2
-rw-r--r--test/fheap.c376
-rw-r--r--test/filter_fail.c89
-rwxr-xr-xtest/gen_cross.c826
-rw-r--r--test/h5test.c10
-rw-r--r--test/h5test.h2
-rw-r--r--test/le_data.h5bin2288 -> 40320 bytes
-rw-r--r--test/links.c140
-rw-r--r--test/links_env.c192
-rw-r--r--test/mf.c132
-rw-r--r--test/ohdr.c181
-rw-r--r--test/testerror.sh.in2
-rw-r--r--test/testfiles/err_compat_140
-rw-r--r--test/testfiles/links_env.out6
-rw-r--r--test/testframe.c2
-rw-r--r--test/testlinks_env.sh.in46
-rw-r--r--test/th5o.c333
-rw-r--r--test/vms_data.h5bin2288 -> 40320 bytes
30 files changed, 6038 insertions, 753 deletions
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index f59be26..98fffe0 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -22,11 +22,9 @@ CONFIGURE_FILE (${HDF5_TEST_SOURCE_DIR}/H5srcdir_str.h.in H5srcdir_str.h @ONLY)
INCLUDE_DIRECTORIES (${CMAKE_CURRENT_BINARY_DIR})
ADD_LIBRARY (${HDF5_TEST_LIB_TARGET} ${LIB_TYPE} ${TEST_LIB_SRCS} ${TEST_LIB_HEADERS})
-IF (BUILD_SHARED_LIBS)
- IF (MSVC)
- TARGET_LINK_LIBRARIES (${HDF5_TEST_LIB_TARGET} "ws2_32.lib")
- ENDIF (MSVC)
-ENDIF (BUILD_SHARED_LIBS)
+IF (MSVC)
+ TARGET_LINK_LIBRARIES (${HDF5_TEST_LIB_TARGET} "ws2_32.lib")
+ENDIF (MSVC)
TARGET_LINK_LIBRARIES (${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET})
SET_GLOBAL_VARIABLE (HDF5_LIBRARIES_TO_EXPORT "${HDF5_LIBRARIES_TO_EXPORT};${HDF5_TEST_LIB_TARGET}")
H5_SET_LIB_OPTIONS (${HDF5_TEST_LIB_TARGET} ${HDF5_TEST_LIB_NAME} ${LIB_TYPE})
@@ -57,16 +55,17 @@ SET (HDF5_REFERENCE_FILES
err_compat_2
error_test_1
error_test_2
+ links_env.out
)
FOREACH (ref_file ${HDF5_REFERENCE_FILES})
- SET (dest "${PROJECT_BINARY_DIR}/testfiles/${ref_file}")
+ SET (dest "${PROJECT_BINARY_DIR}/${ref_file}")
#MESSAGE (STATUS " Copying ${h5_file}")
ADD_CUSTOM_COMMAND (
TARGET ${HDF5_TEST_LIB_TARGET}
POST_BUILD
- COMMAND ${CMAKE_COMMAND}
- ARGS -E copy_if_different ${HDF5_TEST_SOURCE_DIR}/testfiles/${ref_file} ${dest}
+ COMMAND ${XLATE_UTILITY}
+ ARGS ${HDF5_TEST_SOURCE_DIR}/testfiles/${ref_file} ${dest} -l3
)
ENDFOREACH (ref_file ${HDF5_REFERENCE_FILES})
@@ -147,8 +146,7 @@ SET (testhdf5_SRCS
#-- Adding test for testhdf5
ADD_EXECUTABLE (testhdf5 ${testhdf5_SRCS})
-H5_NAMING (testhdf5)
-TARGET_WIN_PROPERTIES (testhdf5)
+H5_NAMING (testhdf5 ${LIB_TYPE})
TARGET_LINK_LIBRARIES (testhdf5 ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET})
ADD_TEST (NAME testhdf5 COMMAND $<TARGET_FILE:testhdf5>)
@@ -161,14 +159,86 @@ ADD_TEST (NAME testhdf5 COMMAND $<TARGET_FILE:testhdf5>)
MACRO (ADD_H5_TEST file)
ADD_EXECUTABLE (${file} ${HDF5_TEST_SOURCE_DIR}/${file}.c)
- H5_NAMING (${file})
- TARGET_WIN_PROPERTIES (${file})
+ H5_NAMING (${file} ${LIB_TYPE})
TARGET_LINK_LIBRARIES (${file} ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET})
ADD_TEST (NAME ${file} COMMAND $<TARGET_FILE:${file}>)
ENDMACRO (ADD_H5_TEST file)
+# Remove any output file left over from previous test run
+ADD_TEST (
+ NAME h5test-clear-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ cache_test.h5
+ coord.h5
+ dt_arith1.h5
+ dt_arith2.h5
+ dtransform.h5
+ dtypes4.h5
+ dtypes5.h5
+ extlinks16A00000.h5
+ extlinks16A00001.h5
+ extlinks16A00002.h5
+ extlinks16B-b.h5
+ extlinks16B-g.h5
+ extlinks16B-l.h5
+ extlinks16B-r.h5
+ extlinks16B-s.h5
+ extlinks19B00000.h5
+ extlinks19B00001.h5
+ extlinks19B00002.h5
+ extlinks19B00003.h5
+ extlinks19B00004.h5
+ extlinks19B00005.h5
+ extlinks19B00006.h5
+ extlinks19B00007.h5
+ extlinks19B00008.h5
+ extlinks19B00009.h5
+ extlinks19B00010.h5
+ extlinks19B00011.h5
+ extlinks19B00012.h5
+ extlinks19B00013.h5
+ extlinks19B00014.h5
+ extlinks19B00015.h5
+ extlinks19B00016.h5
+ extlinks19B00017.h5
+ extlinks19B00018.h5
+ extlinks19B00019.h5
+ extlinks19B00020.h5
+ extlinks19B00021.h5
+ extlinks19B00022.h5
+ extlinks19B00023.h5
+ extlinks19B00024.h5
+ extlinks19B00025.h5
+ extlinks19B00026.h5
+ extlinks19B00027.h5
+ extlinks19B00028.h5
+ fheap.h5
+ objcopy_ext.h5
+ sys_file1
+ tattr.h5
+ testmeta.h5
+ tfile1.h5
+ tfile2.h5
+ tfile3.h5
+ tfile4.h5
+ tfile5.h5
+ th5o_file
+ th5s1.h5
+ th5s2.h5
+ th5s3.h5
+ tnullspace.h5
+ tselect.h5
+ tsohm.h5
+ tsohm_dst.h5
+ tsohm_src.h5
+ tstint1.h5
+ tstint2.h5
+)
+
SET (H5_TESTS
+ accum
lheap
ohdr
stab
@@ -212,21 +282,17 @@ SET (H5_TESTS
earray
btree2
fheap
- error_test
- err_compat
+ #error_test
+ #err_compat
tcheck_version
testmeta
+ #links_env
)
FOREACH (test ${H5_TESTS})
ADD_H5_TEST(${test})
ENDFOREACH (test ${H5_TESTS})
-#-- Allow extra time for fheap to complete 30min
-IF (WIN32)
- SET_TESTS_PROPERTIES (fheap PROPERTIES TIMEOUT 2500)
-ENDIF (WIN32)
-
##############################################################################
##############################################################################
@@ -236,24 +302,21 @@ ENDIF (WIN32)
#-- Adding test for cache
ADD_EXECUTABLE (cache ${HDF5_TEST_SOURCE_DIR}/cache.c ${HDF5_TEST_SOURCE_DIR}/cache_common.c)
-H5_NAMING (cache)
-TARGET_WIN_PROPERTIES (cache)
+H5_NAMING (cache ${LIB_TYPE})
TARGET_LINK_LIBRARIES (cache ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
ADD_TEST (NAME cache COMMAND $<TARGET_FILE:cache>)
#-- Adding test for cache_api
ADD_EXECUTABLE (cache_api ${HDF5_TEST_SOURCE_DIR}/cache_api.c ${HDF5_TEST_SOURCE_DIR}/cache_common.c)
-H5_NAMING (cache_api)
-TARGET_WIN_PROPERTIES (cache_api)
+H5_NAMING (cache_api ${LIB_TYPE})
TARGET_LINK_LIBRARIES (cache_api ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
ADD_TEST (NAME cache_api COMMAND $<TARGET_FILE:cache_api>)
#-- Adding test for cache_tagging
ADD_EXECUTABLE (cache_tagging ${HDF5_TEST_SOURCE_DIR}/cache_tagging.c ${HDF5_TEST_SOURCE_DIR}/cache_common.c)
-H5_NAMING (cache_tagging)
-TARGET_WIN_PROPERTIES (cache_tagging)
+H5_NAMING (cache_tagging ${LIB_TYPE})
TARGET_LINK_LIBRARIES (cache_tagging ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
ADD_TEST (NAME cache_tagging COMMAND $<TARGET_FILE:cache_tagging>)
@@ -265,12 +328,62 @@ ADD_EXECUTABLE (ttsafe
${HDF5_TEST_SOURCE_DIR}/ttsafe_cancel.c
${HDF5_TEST_SOURCE_DIR}/ttsafe_acreate.c
)
-H5_NAMING (ttsafe)
-TARGET_WIN_PROPERTIES (ttsafe)
+H5_NAMING (ttsafe ${LIB_TYPE})
TARGET_LINK_LIBRARIES (ttsafe ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
ADD_TEST (NAME ttsafe COMMAND $<TARGET_FILE:ttsafe>)
+#-- Adding test for err_compat
+IF (HDF5_ENABLE_DEPRECATED_SYMBOLS)
+ ADD_EXECUTABLE (err_compat ${HDF5_TEST_SOURCE_DIR}/err_compat.c)
+ H5_NAMING (err_compat ${LIB_TYPE})
+ TARGET_LINK_LIBRARIES (err_compat ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+
+ ADD_TEST (NAME err_compat COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:err_compat>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_MASK_ERROR=true"
+ -D "TEST_OUTPUT=err_compat.txt"
+ -D "TEST_REFERENCE=err_compat_1"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF5_RESOURCES_DIR}/runTest.cmake"
+ )
+ENDIF (HDF5_ENABLE_DEPRECATED_SYMBOLS)
+
+#-- Adding test for error_test
+ADD_EXECUTABLE (error_test ${HDF5_TEST_SOURCE_DIR}/error_test.c)
+H5_NAMING (error_test ${LIB_TYPE})
+TARGET_LINK_LIBRARIES (error_test ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+
+ADD_TEST (NAME error_test COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:error_test>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_MASK_ERROR=true"
+ -D "TEST_OUTPUT=error_test.txt"
+ -D "TEST_REFERENCE=error_test_1"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF5_RESOURCES_DIR}/runTest.cmake"
+)
+
+#-- Adding test for links_env
+ADD_EXECUTABLE (links_env ${HDF5_TEST_SOURCE_DIR}/links_env.c)
+H5_NAMING (links_env ${LIB_TYPE})
+TARGET_LINK_LIBRARIES (links_env ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+
+ADD_TEST (NAME links_env COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:links_env>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_ENV_VAR:STRING=HDF5_EXT_PREFIX"
+ -D "TEST_ENV_VALUE:STRING=.:tmp"
+ -D "TEST_EXPECT=0"
+ -D "TEST_OUTPUT=links_env.txt"
+ -D "TEST_REFERENCE=links_env.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF5_RESOURCES_DIR}/runTest.cmake"
+)
+
IF (HDF5_TEST_VFD)
SET (VFD_LIST
@@ -281,13 +394,66 @@ IF (HDF5_TEST_VFD)
multi
family
)
+
+ SET (H5_VFD_TESTS
+ testhdf5
+ accum
+ lheap
+ ohdr
+ stab
+ gheap
+ cache
+ cache_api
+ cache_tagging
+ pool
+ hyperslab
+ istore
+ bittests
+ dt_arith
+ dtypes
+ cmpd_dset
+ filter_fail
+ extend
+ external
+ objcopy
+ links
+ unlink
+ big
+ mtime
+ fillval
+ mount
+ flush1
+ flush2
+ app_ref
+ enum
+ set_extent
+ ttsafe
+ getname
+ vfd
+ ntypes
+ dangle
+ dtransform
+ reserved
+ cross_read
+ freespace
+ mf
+ farray
+ earray
+ btree2
+ #fheap
+ error_test
+ err_compat
+ tcheck_version
+ testmeta
+ links_env
+)
IF (DIRECT_VFD)
SET (VFD_LIST ${VFD_LIST} direct)
ENDIF (DIRECT_VFD)
MACRO (ADD_VFD_TEST vfdname resultcode)
- FOREACH (test ${H5_TESTS})
+ FOREACH (test ${H5_VFD_TESTS})
ADD_TEST (
NAME VFD-${vfdname}-${test}
COMMAND "${CMAKE_COMMAND}"
@@ -295,10 +461,24 @@ IF (HDF5_TEST_VFD)
-D "TEST_ARGS:STRING="
-D "TEST_VFD:STRING=${vfdname}"
-D "TEST_EXPECT=${resultcode}"
+ -D "TEST_OUTPUT=${test}"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF5_RESOURCES_DIR}/vfdTest.cmake"
+ )
+ ENDFOREACH (test ${H5_VFD_TESTS})
+ IF (HDF5_TEST_FHEAP_VFD)
+ ADD_TEST (
+ NAME VFD-${vfdname}-fheap
+ COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:fheap>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_VFD:STRING=${vfdname}"
+ -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_OUTPUT=fheap"
-D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
-P "${HDF5_RESOURCES_DIR}/vfdTest.cmake"
)
- ENDFOREACH (test ${H5_TESTS})
+ ENDIF (HDF5_TEST_FHEAP_VFD)
ENDMACRO (ADD_VFD_TEST)
# Run test with different Virtual File Driver
@@ -317,8 +497,7 @@ ENDIF (HDF5_TEST_VFD)
IF (HDF5_BUILD_GENERATORS AND NOT BUILD_SHARED_LIBS)
MACRO (ADD_H5_GENERATOR genfile)
ADD_EXECUTABLE (${genfile} ${HDF5_TEST_SOURCE_DIR}/${genfile}.c)
- H5_NAMING (${genfile})
- TARGET_WIN_PROPERTIES (${genfile})
+ H5_NAMING (${genfile} ${LIB_TYPE})
TARGET_LINK_LIBRARIES (${genfile} ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET})
ENDMACRO (ADD_H5_GENERATOR genfile)
@@ -345,7 +524,7 @@ IF (HDF5_BUILD_GENERATORS AND NOT BUILD_SHARED_LIBS)
FOREACH (gen ${H5_GENERATORS})
ADD_EXECUTABLE (${gen} ${HDF5_TEST_SOURCE_DIR}/${gen}.c)
- H5_NAMING (${gen})
+ H5_NAMING (${gen} ${LIB_TYPE})
TARGET_LINK_LIBRARIES (${gen} ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET})
ENDFOREACH (gen ${H5_GENERATORS})
diff --git a/test/Makefile.am b/test/Makefile.am
index cef0165..06c56f8 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -24,9 +24,9 @@ include $(top_srcdir)/config/commence.am
INCLUDES=-I$(top_srcdir)/src -I$(top_builddir)/src
# Test script for error_test and err_compat
-TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh
+TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh
check_SCRIPTS = $(TEST_SCRIPT)
-SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT)
+SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT)
# These are our main targets. They should be listed in the order to be
@@ -36,8 +36,8 @@ SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT)
# These tests (fheap, btree2) are under development and are not used by
# the library yet. Move them to the end so that their failure do not block
# other current library code tests.
-TEST_PROG=testhdf5 lheap ohdr stab gheap cache cache_api cache_tagging \
- pool hyperslab istore bittests dt_arith \
+TEST_PROG= testhdf5 lheap ohdr stab gheap cache cache_api cache_tagging \
+ pool accum hyperslab istore bittests dt_arith \
dtypes dsets cmpd_dset filter_fail extend external objcopy links unlink \
big mtime fillval mount flush1 flush2 app_ref enum \
set_extent ttsafe \
@@ -50,7 +50,7 @@ TEST_PROG=testhdf5 lheap ohdr stab gheap cache cache_api cache_tagging \
# 'make check' doesn't run them directly, so they are not included in TEST_PROG.
# Also build testmeta, which is used for timings test. It builds quickly,
# and this lets automake keep all its test programs in one place.
-check_PROGRAMS=$(TEST_PROG) error_test err_compat tcheck_version testmeta
+check_PROGRAMS=$(TEST_PROG) error_test err_compat tcheck_version testmeta links_env
# These programs generate test files for the tests. They don't need to be
@@ -104,7 +104,7 @@ flush2.chkexe_: flush1.chkexe_
# specifying a file prefix or low-level driver. Changing the file
# prefix or low-level driver with environment variables will influence
# the temporary file name in ways that the makefile is not aware of.
-CHECK_CLEANFILES+=cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offset.h5 \
+CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offset.h5 \
max_compact_dataset.h5 simple.h5 set_local.h5 random_chunks.h5 \
huge_chunks.h5 chunk_cache.h5 big_chunk.h5 chunk_expand.h5 \
copy_dcpl_newfile.h5 extend.h5 istore.h5 extlinks*.h5 frspace.h5 links*.h5 \
@@ -134,6 +134,6 @@ testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \
tvlstr.c tvltypes.c
# Temporary files.
-DISTCLEANFILES=testerror.sh testlibinfo.sh testcheck_version.sh
+DISTCLEANFILES=testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh
include $(top_srcdir)/config/conclude.am
diff --git a/test/Makefile.in b/test/Makefile.in
index 493a684..a734045 100644
--- a/test/Makefile.in
+++ b/test/Makefile.in
@@ -55,10 +55,11 @@ host_triplet = @host@
DIST_COMMON = $(srcdir)/H5srcdir_str.h.in $(srcdir)/Makefile.am \
$(srcdir)/Makefile.in $(srcdir)/testcheck_version.sh.in \
$(srcdir)/testerror.sh.in $(srcdir)/testlibinfo.sh.in \
- $(top_srcdir)/config/commence.am \
+ $(srcdir)/testlinks_env.sh.in $(top_srcdir)/config/commence.am \
$(top_srcdir)/config/conclude.am COPYING
check_PROGRAMS = $(am__EXEEXT_1) error_test$(EXEEXT) \
- err_compat$(EXEEXT) tcheck_version$(EXEEXT) testmeta$(EXEEXT)
+ err_compat$(EXEEXT) tcheck_version$(EXEEXT) testmeta$(EXEEXT) \
+ links_env$(EXEEXT)
@BUILD_ALL_CONDITIONAL_TRUE@noinst_PROGRAMS = $(am__EXEEXT_2)
TESTS = $(check_PROGRAMS) $(check_SCRIPTS)
subdir = test
@@ -69,7 +70,7 @@ am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs
CONFIG_HEADER = $(top_builddir)/src/H5config.h
CONFIG_CLEAN_FILES = testcheck_version.sh testerror.sh H5srcdir_str.h \
- testlibinfo.sh
+ testlibinfo.sh testlinks_env.sh
CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
libh5test_la_LIBADD =
@@ -77,18 +78,18 @@ am_libh5test_la_OBJECTS = h5test.lo testframe.lo cache_common.lo
libh5test_la_OBJECTS = $(am_libh5test_la_OBJECTS)
am__EXEEXT_1 = testhdf5$(EXEEXT) lheap$(EXEEXT) ohdr$(EXEEXT) \
stab$(EXEEXT) gheap$(EXEEXT) cache$(EXEEXT) cache_api$(EXEEXT) \
- cache_tagging$(EXEEXT) pool$(EXEEXT) hyperslab$(EXEEXT) \
- istore$(EXEEXT) bittests$(EXEEXT) dt_arith$(EXEEXT) \
- dtypes$(EXEEXT) dsets$(EXEEXT) cmpd_dset$(EXEEXT) \
- filter_fail$(EXEEXT) extend$(EXEEXT) external$(EXEEXT) \
- objcopy$(EXEEXT) links$(EXEEXT) unlink$(EXEEXT) big$(EXEEXT) \
- mtime$(EXEEXT) fillval$(EXEEXT) mount$(EXEEXT) flush1$(EXEEXT) \
- flush2$(EXEEXT) app_ref$(EXEEXT) enum$(EXEEXT) \
- set_extent$(EXEEXT) ttsafe$(EXEEXT) getname$(EXEEXT) \
- vfd$(EXEEXT) ntypes$(EXEEXT) dangle$(EXEEXT) \
- dtransform$(EXEEXT) reserved$(EXEEXT) cross_read$(EXEEXT) \
- freespace$(EXEEXT) mf$(EXEEXT) farray$(EXEEXT) earray$(EXEEXT) \
- btree2$(EXEEXT) fheap$(EXEEXT)
+ cache_tagging$(EXEEXT) pool$(EXEEXT) accum$(EXEEXT) \
+ hyperslab$(EXEEXT) istore$(EXEEXT) bittests$(EXEEXT) \
+ dt_arith$(EXEEXT) dtypes$(EXEEXT) dsets$(EXEEXT) \
+ cmpd_dset$(EXEEXT) filter_fail$(EXEEXT) extend$(EXEEXT) \
+ external$(EXEEXT) objcopy$(EXEEXT) links$(EXEEXT) \
+ unlink$(EXEEXT) big$(EXEEXT) mtime$(EXEEXT) fillval$(EXEEXT) \
+ mount$(EXEEXT) flush1$(EXEEXT) flush2$(EXEEXT) \
+ app_ref$(EXEEXT) enum$(EXEEXT) set_extent$(EXEEXT) \
+ ttsafe$(EXEEXT) getname$(EXEEXT) vfd$(EXEEXT) ntypes$(EXEEXT) \
+ dangle$(EXEEXT) dtransform$(EXEEXT) reserved$(EXEEXT) \
+ cross_read$(EXEEXT) freespace$(EXEEXT) mf$(EXEEXT) \
+ farray$(EXEEXT) earray$(EXEEXT) btree2$(EXEEXT) fheap$(EXEEXT)
am__EXEEXT_2 = gen_bad_ohdr$(EXEEXT) gen_bogus$(EXEEXT) \
gen_cross$(EXEEXT) gen_deflate$(EXEEXT) gen_filters$(EXEEXT) \
gen_new_array$(EXEEXT) gen_new_fill$(EXEEXT) \
@@ -98,6 +99,10 @@ am__EXEEXT_2 = gen_bad_ohdr$(EXEEXT) gen_bogus$(EXEEXT) \
space_overflow$(EXEEXT) gen_filespace$(EXEEXT) \
gen_specmetaread$(EXEEXT) gen_sizes_lheap$(EXEEXT)
PROGRAMS = $(noinst_PROGRAMS)
+accum_SOURCES = accum.c
+accum_OBJECTS = accum.$(OBJEXT)
+accum_LDADD = $(LDADD)
+accum_DEPENDENCIES = libh5test.la $(LIBHDF5)
app_ref_SOURCES = app_ref.c
app_ref_OBJECTS = app_ref.$(OBJEXT)
app_ref_LDADD = $(LDADD)
@@ -294,6 +299,10 @@ links_SOURCES = links.c
links_OBJECTS = links.$(OBJEXT)
links_LDADD = $(LDADD)
links_DEPENDENCIES = libh5test.la $(LIBHDF5)
+links_env_SOURCES = links_env.c
+links_env_OBJECTS = links_env.$(OBJEXT)
+links_env_LDADD = $(LDADD)
+links_env_DEPENDENCIES = libh5test.la $(LIBHDF5)
mf_SOURCES = mf.c
mf_OBJECTS = mf.$(OBJEXT)
mf_LDADD = $(LDADD)
@@ -385,21 +394,7 @@ CCLD = $(CC)
LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
$(LDFLAGS) -o $@
-SOURCES = $(libh5test_la_SOURCES) app_ref.c big.c bittests.c btree2.c \
- cache.c cache_api.c cache_tagging.c cmpd_dset.c cross_read.c \
- dangle.c dsets.c dt_arith.c dtransform.c dtypes.c earray.c \
- enum.c err_compat.c error_test.c extend.c external.c farray.c \
- fheap.c fillval.c filter_fail.c flush1.c flush2.c freespace.c \
- gen_bad_ohdr.c gen_bogus.c gen_cross.c gen_deflate.c \
- gen_filespace.c gen_filters.c gen_new_array.c gen_new_fill.c \
- gen_new_group.c gen_new_mtime.c gen_new_super.c \
- gen_noencoder.c gen_nullspace.c gen_sizes_lheap.c \
- gen_specmetaread.c gen_udlinks.c getname.c gheap.c hyperslab.c \
- istore.c lheap.c links.c mf.c mount.c mtime.c ntypes.c \
- objcopy.c ohdr.c pool.c reserved.c set_extent.c \
- space_overflow.c stab.c tcheck_version.c $(testhdf5_SOURCES) \
- testmeta.c $(ttsafe_SOURCES) unlink.c vfd.c
-DIST_SOURCES = $(libh5test_la_SOURCES) app_ref.c big.c bittests.c \
+SOURCES = $(libh5test_la_SOURCES) accum.c app_ref.c big.c bittests.c \
btree2.c cache.c cache_api.c cache_tagging.c cmpd_dset.c \
cross_read.c dangle.c dsets.c dt_arith.c dtransform.c dtypes.c \
earray.c enum.c err_compat.c error_test.c extend.c external.c \
@@ -409,10 +404,25 @@ DIST_SOURCES = $(libh5test_la_SOURCES) app_ref.c big.c bittests.c \
gen_new_fill.c gen_new_group.c gen_new_mtime.c gen_new_super.c \
gen_noencoder.c gen_nullspace.c gen_sizes_lheap.c \
gen_specmetaread.c gen_udlinks.c getname.c gheap.c hyperslab.c \
- istore.c lheap.c links.c mf.c mount.c mtime.c ntypes.c \
- objcopy.c ohdr.c pool.c reserved.c set_extent.c \
+ istore.c lheap.c links.c links_env.c mf.c mount.c mtime.c \
+ ntypes.c objcopy.c ohdr.c pool.c reserved.c set_extent.c \
space_overflow.c stab.c tcheck_version.c $(testhdf5_SOURCES) \
testmeta.c $(ttsafe_SOURCES) unlink.c vfd.c
+DIST_SOURCES = $(libh5test_la_SOURCES) accum.c app_ref.c big.c \
+ bittests.c btree2.c cache.c cache_api.c cache_tagging.c \
+ cmpd_dset.c cross_read.c dangle.c dsets.c dt_arith.c \
+ dtransform.c dtypes.c earray.c enum.c err_compat.c \
+ error_test.c extend.c external.c farray.c fheap.c fillval.c \
+ filter_fail.c flush1.c flush2.c freespace.c gen_bad_ohdr.c \
+ gen_bogus.c gen_cross.c gen_deflate.c gen_filespace.c \
+ gen_filters.c gen_new_array.c gen_new_fill.c gen_new_group.c \
+ gen_new_mtime.c gen_new_super.c gen_noencoder.c \
+ gen_nullspace.c gen_sizes_lheap.c gen_specmetaread.c \
+ gen_udlinks.c getname.c gheap.c hyperslab.c istore.c lheap.c \
+ links.c links_env.c mf.c mount.c mtime.c ntypes.c objcopy.c \
+ ohdr.c pool.c reserved.c set_extent.c space_overflow.c stab.c \
+ tcheck_version.c $(testhdf5_SOURCES) testmeta.c \
+ $(ttsafe_SOURCES) unlink.c vfd.c
ETAGS = etags
CTAGS = ctags
am__tty_colors = \
@@ -464,6 +474,7 @@ DEFS = @DEFS@
DEPDIR = @DEPDIR@
DEPRECATED_SYMBOLS = @DEPRECATED_SYMBOLS@
DIRECT_VFD = @DIRECT_VFD@
+DLLTOOL = @DLLTOOL@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
DYNAMIC_DIRS = @DYNAMIC_DIRS@
@@ -525,6 +536,7 @@ LTLIBOBJS = @LTLIBOBJS@
LT_STATIC_EXEC = @LT_STATIC_EXEC@
MAINT = @MAINT@
MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
MPE = @MPE@
MPI_GET_SIZE = @MPI_GET_SIZE@
@@ -581,6 +593,7 @@ abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
@@ -692,7 +705,7 @@ TRACE = perl $(top_srcdir)/bin/trace
# specifying a file prefix or low-level driver. Changing the file
# prefix or low-level driver with environment variables will influence
# the temporary file name in ways that the makefile is not aware of.
-CHECK_CLEANFILES = *.chkexe *.chklog *.clog cmpd_dset.h5 \
+CHECK_CLEANFILES = *.chkexe *.chklog *.clog accum.h5 cmpd_dset.h5 \
compact_dataset.h5 dataset.h5 dset_offset.h5 \
max_compact_dataset.h5 simple.h5 set_local.h5 random_chunks.h5 \
huge_chunks.h5 chunk_cache.h5 big_chunk.h5 chunk_expand.h5 \
@@ -718,9 +731,9 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog cmpd_dset.h5 \
INCLUDES = -I$(top_srcdir)/src -I$(top_builddir)/src
# Test script for error_test and err_compat
-TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh
+TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh
check_SCRIPTS = $(TEST_SCRIPT)
-SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT)
+SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT)
# These are our main targets. They should be listed in the order to be
# executed, generally most specific tests to least specific tests.
@@ -730,7 +743,7 @@ SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT)
# the library yet. Move them to the end so that their failure do not block
# other current library code tests.
TEST_PROG = testhdf5 lheap ohdr stab gheap cache cache_api cache_tagging \
- pool hyperslab istore bittests dt_arith \
+ pool accum hyperslab istore bittests dt_arith \
dtypes dsets cmpd_dset filter_fail extend external objcopy links unlink \
big mtime fillval mount flush1 flush2 app_ref enum \
set_extent ttsafe \
@@ -773,7 +786,7 @@ testhdf5_SOURCES = testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \
# Temporary files.
-DISTCLEANFILES = testerror.sh testlibinfo.sh testcheck_version.sh
+DISTCLEANFILES = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh
# Automake needs to be taught how to build lib, progs, and tests targets.
# These will be filled in automatically for the most part (e.g.,
@@ -832,6 +845,8 @@ H5srcdir_str.h: $(top_builddir)/config.status $(srcdir)/H5srcdir_str.h.in
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
testlibinfo.sh: $(top_builddir)/config.status $(srcdir)/testlibinfo.sh.in
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
+testlinks_env.sh: $(top_builddir)/config.status $(srcdir)/testlinks_env.sh.in
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
clean-noinstLTLIBRARIES:
-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
@@ -861,6 +876,9 @@ clean-noinstPROGRAMS:
list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
echo " rm -f" $$list; \
rm -f $$list
+accum$(EXEEXT): $(accum_OBJECTS) $(accum_DEPENDENCIES)
+ @rm -f accum$(EXEEXT)
+ $(LINK) $(accum_OBJECTS) $(accum_LDADD) $(LIBS)
app_ref$(EXEEXT): $(app_ref_OBJECTS) $(app_ref_DEPENDENCIES)
@rm -f app_ref$(EXEEXT)
$(LINK) $(app_ref_OBJECTS) $(app_ref_LDADD) $(LIBS)
@@ -1008,6 +1026,9 @@ lheap$(EXEEXT): $(lheap_OBJECTS) $(lheap_DEPENDENCIES)
links$(EXEEXT): $(links_OBJECTS) $(links_DEPENDENCIES)
@rm -f links$(EXEEXT)
$(LINK) $(links_OBJECTS) $(links_LDADD) $(LIBS)
+links_env$(EXEEXT): $(links_env_OBJECTS) $(links_env_DEPENDENCIES)
+ @rm -f links_env$(EXEEXT)
+ $(LINK) $(links_env_OBJECTS) $(links_env_LDADD) $(LIBS)
mf$(EXEEXT): $(mf_OBJECTS) $(mf_DEPENDENCIES)
@rm -f mf$(EXEEXT)
$(LINK) $(mf_OBJECTS) $(mf_LDADD) $(LIBS)
@@ -1066,6 +1087,7 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accum.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/app_ref.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/big.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bittests.Po@am__quote@
@@ -1117,6 +1139,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/istore.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lheap.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/links.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/links_env.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mf.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mount.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mtime.Po@am__quote@
diff --git a/test/accum.c b/test/accum.c
new file mode 100644
index 0000000..c5f6610
--- /dev/null
+++ b/test/accum.c
@@ -0,0 +1,1809 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: Mike McGreevy
+ * October 7, 2010
+ */
+#include "h5test.h"
+
+#define H5F_PACKAGE
+#include "H5Fpkg.h"
+#include "H5FDprivate.h"
+#include "H5Iprivate.h"
+
+/* Filename */
+#define FILENAME "accum.h5"
+
+/* "big" I/O test values */
+#define BIG_BUF_SIZE (6 * 1024 * 1024)
+
+/* Random I/O test values */
+#define RANDOM_BUF_SIZE (1 * 1024 * 1024)
+#define MAX_RANDOM_SEGMENTS (5 * 1024)
+#define RAND_SEG_LEN (1024)
+#define RANDOM_BASE_OFF (1024 * 1024)
+
+/* Make file global to all tests */
+H5F_t * f = NULL;
+
+/* Function Prototypes */
+unsigned test_write_read(void);
+unsigned test_write_read_nonacc_front(void);
+unsigned test_write_read_nonacc_end(void);
+unsigned test_accum_overlap(void);
+unsigned test_accum_overlap_clean(void);
+unsigned test_accum_overlap_size(void);
+unsigned test_accum_non_overlap_size(void);
+unsigned test_accum_adjust(void);
+unsigned test_read_after(void);
+unsigned test_free(void);
+unsigned test_big(void);
+unsigned test_random_write(void);
+
+/* Helper Function Prototypes */
+void accum_printf(void);
+
+/* Private Test H5Faccum Function Wrappers */
+#define accum_write(a,s,b) H5F_block_write(f, H5FD_MEM_DEFAULT, (haddr_t)(a), (size_t)(s), H5P_DATASET_XFER_DEFAULT, (b))
+#define accum_read(a,s,b) H5F_block_read(f, H5FD_MEM_DEFAULT, (haddr_t)(a), (size_t)(s), H5P_DATASET_XFER_DEFAULT, (b))
+#define accum_free(a,s) H5F_accum_free(f, H5P_DATASET_XFER_DEFAULT, H5FD_MEM_DEFAULT, (haddr_t)(a), (hsize_t)(s))
+#define accum_flush() H5F_accum_flush(f, H5P_DATASET_XFER_DEFAULT)
+#define accum_reset() H5F_accum_reset(f, H5P_DATASET_XFER_DEFAULT, TRUE)
+
+/* ================= */
+/* Main Test Routine */
+/* ================= */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Test the metadata accumulator code
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Mike McGreevy
+ * October 7, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ unsigned nerrors = 0; /* track errors */
+ hid_t fid = -1;
+
+ /* Test Setup */
+ puts("Testing the metadata accumulator");
+
+ /* Create a test file */
+ if((fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+
+ /* Get H5F_t * to internal file structure */
+ if(NULL == (f = (H5F_t *)H5I_object(fid))) FAIL_STACK_ERROR
+
+ /* We'll be writing lots of garbage data, so extend the
+ file a ways. 10MB should do. */
+ if(H5FD_set_eoa(f->shared->lf, H5FD_MEM_DEFAULT, (haddr_t)(1024*1024*10)) < 0) FAIL_STACK_ERROR
+
+ /* Reset metadata accumulator for the file */
+ if(accum_reset() < 0) FAIL_STACK_ERROR
+
+ /* Test Functions */
+ nerrors += test_write_read();
+ nerrors += test_write_read_nonacc_front();
+ nerrors += test_write_read_nonacc_end();
+ nerrors += test_accum_overlap();
+ nerrors += test_accum_overlap_clean();
+ nerrors += test_accum_overlap_size();
+ nerrors += test_accum_non_overlap_size();
+ nerrors += test_accum_adjust();
+ nerrors += test_read_after();
+ nerrors += test_free();
+ nerrors += test_big();
+ nerrors += test_random_write();
+
+ /* End of test code, close and delete file */
+ if(H5Fclose(fid) < 0) TEST_ERROR
+ HDremove(FILENAME);
+
+ if(nerrors)
+ goto error;
+ puts("All metadata accumulator tests passed.");
+
+ return 0;
+
+error:
+ puts("*** TESTS FAILED ***");
+ return 1;
+} /* end main() */
+
+/* ============================= */
+/* Individual Unit Test Routines */
+/* ============================= */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_write_read
+ *
+ * Purpose: Simple test to write to then read from metadata accumulator.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Mike McGreevy
+ * October 7, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_write_read(void)
+{
+ int i = 0;
+ int *write_buf, *read_buf;
+
+ TESTING("simple write/read to/from metadata accumulator");
+
+ /* Allocate buffers */
+ write_buf = (int *)HDmalloc(1024 * sizeof(int));
+ HDassert(write_buf);
+ read_buf = (int *)HDcalloc(1024, sizeof(int));
+ HDassert(read_buf);
+
+ /* Fill buffer with data, zero out read buffer */
+ for(i = 0; i < 1024; i++)
+ write_buf[i] = i + 1;
+
+ /* Do a simple write/read/verify of data */
+ /* Write 1KB at Address 0 */
+ if(accum_write(0, 1024, write_buf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(0, 1024, read_buf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(write_buf, read_buf, 1024) != 0) TEST_ERROR;
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ /* Release memory */
+ HDfree(write_buf);
+ HDfree(read_buf);
+
+ return 0;
+
+error:
+ /* Release memory */
+ HDfree(write_buf);
+ HDfree(read_buf);
+
+ return 1;
+} /* test_write_read */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_write_read_nonacc_front
+ *
+ * Purpose: Simple test to write to then read from before metadata accumulator.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Allen Byrne
+ * October 8, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_write_read_nonacc_front(void)
+{
+ int i = 0;
+ int *write_buf, *read_buf;
+
+ TESTING("simple write/read to/from before metadata accumulator");
+
+ /* Allocate buffers */
+ write_buf = (int *)HDmalloc(2048 * sizeof(int));
+ HDassert(write_buf);
+ read_buf = (int *)HDcalloc(2048, sizeof(int));
+ HDassert(read_buf);
+
+ /* Fill buffer with data, zero out read buffer */
+ for(i = 0; i < 2048; i++)
+ write_buf[i] = i + 1;
+
+ /* Do a simple write/read/verify of data */
+ /* Write 1KB at Address 0 */
+ if(accum_write(0, 1024, write_buf) < 0) FAIL_STACK_ERROR;
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+ if(accum_write(1024, 1024, write_buf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(0, 1024, read_buf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(write_buf, read_buf, 1024) != 0) TEST_ERROR;
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ /* Release memory */
+ HDfree(write_buf);
+ HDfree(read_buf);
+
+ return 0;
+
+error:
+ /* Release memory */
+ HDfree(write_buf);
+ HDfree(read_buf);
+
+ return 1;
+} /* test_write_read */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_write_read_nonacc_end
+ *
+ * Purpose: Simple test to write to then read from after metadata accumulator.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Allen Byrne
+ * October 8, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_write_read_nonacc_end(void)
+{
+ int i = 0;
+ int *write_buf, *read_buf;
+
+ TESTING("simple write/read to/from after metadata accumulator");
+
+ /* Allocate buffers */
+ write_buf = (int *)HDmalloc(2048 * sizeof(int));
+ HDassert(write_buf);
+ read_buf = (int *)HDcalloc(2048, sizeof(int));
+ HDassert(read_buf);
+
+ /* Fill buffer with data, zero out read buffer */
+ for(i = 0; i < 2048; i++)
+ write_buf[i] = i + 1;
+
+ /* Do a simple write/read/verify of data */
+ /* Write 1KB at Address 0 */
+ if(accum_write(1024, 1024, write_buf) < 0) FAIL_STACK_ERROR;
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+ if(accum_write(0, 1024, write_buf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(1024, 1024, read_buf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(write_buf, read_buf, 1024) != 0) TEST_ERROR;
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ /* Release memory */
+ HDfree(write_buf);
+ HDfree(read_buf);
+
+ return 0;
+
+error:
+ /* Release memory */
+ HDfree(write_buf);
+ HDfree(read_buf);
+
+ return 1;
+} /* test_write_read */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_free
+ *
+ * Purpose: Simple test to free metadata accumulator.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Raymond Lu
+ * October 8, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_free(void)
+{
+ int i = 0;
+ int32_t *wbuf = NULL;
+ int32_t *rbuf = NULL;
+ int32_t *expect = NULL;
+
+ TESTING("simple freeing metadata accumulator");
+
+ /* Write and free the whole accumulator. */
+ wbuf = (int32_t *)HDmalloc(256 * sizeof(int32_t));
+ HDassert(wbuf);
+ rbuf = (int32_t *)HDmalloc(256 * sizeof(int32_t));
+ HDassert(rbuf);
+ expect = (int32_t *)HDmalloc(256 * sizeof(int32_t));
+ HDassert(expect);
+
+ /* Fill buffer with data */
+ for(i = 0; i < 256; i++)
+ wbuf[i] = (int32_t)(i + 1);
+
+ if(accum_write(0, 256 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+
+ if(accum_free(0, 256 * sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+ /* Free an empty accumulator */
+ if(accum_free(0, 256 * 1024 * sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+ /* Write second quarter of the accumulator */
+ if(accum_write(64 * sizeof(int32_t), 64 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Free the second quarter of the accumulator, the requested area
+ * is bigger than the data region on the right side. */
+ if(accum_free(64 * sizeof(int32_t), 65 * sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+
+ /* Write half of the accumulator. */
+ if(accum_write(0, 128 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Free the first block of 4B */
+ if(accum_free(0, sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+ /* Check that the accumulator still contains the correct data */
+ if(accum_read(1 * sizeof(int32_t), 127 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf + 1, rbuf, 127 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Free the block of 4B at 127*4B */
+ if(accum_free(127 * sizeof(int32_t), sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+ /* Check that the accumulator still contains the correct data */
+ if(accum_read(1 * sizeof(int32_t), 126 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf + 1, rbuf, 126 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Free the block of 4B at 2*4B */
+ if(accum_free(2 * sizeof(int32_t), sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+ /* Check that the accumulator still contains the correct data */
+ if(accum_read(1 * sizeof(int32_t), 1 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf + 1, rbuf, 1 * sizeof(int32_t)) != 0) TEST_ERROR;
+ if(accum_read(3 * sizeof(int32_t), 124 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf + 3, rbuf, 124 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+
+ /* Test freeing section that overlaps the start of the accumulator and is
+ * entirely before dirty section */
+ if(accum_write(64 * sizeof(int32_t), 128 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 64, wbuf, 128 * sizeof(int32_t));
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ if(accum_write(68 * sizeof(int32_t), 4 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 68, wbuf, 4 * sizeof(int32_t));
+ if(accum_free(62 * sizeof(int32_t), 4 * sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+ /* Check that the accumulator still contains the correct data */
+ if(accum_read(66 * sizeof(int32_t), 126 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(expect + 66, rbuf, 126 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+
+ /* Test freeing section that overlaps the start of the accumulator and
+ * completely contains dirty section */
+ if(accum_write(64 * sizeof(int32_t), 128 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 64, wbuf, 128 * sizeof(int32_t));
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ if(accum_write(68 * sizeof(int32_t), 4 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 68, wbuf, 4 * sizeof(int32_t));
+ if(accum_free(62 * sizeof(int32_t), 16 * sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+ /* Check that the accumulator still contains the correct data */
+ if(accum_read(78 * sizeof(int32_t), 114 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(expect + 78, rbuf, 114 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+
+ /* Test freeing section completely contained in accumulator and is entirely
+ * before dirty section */
+ if(accum_write(64 * sizeof(int32_t), 128 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 64, wbuf, 128 * sizeof(int32_t));
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ if(accum_write(72 * sizeof(int32_t), 4 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 72, wbuf, 4 * sizeof(int32_t));
+ if(accum_free(66 * sizeof(int32_t), 4 * sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+ /* Check that the accumulator still contains the correct data */
+ if(accum_read(70 * sizeof(int32_t), 122 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(expect + 70, rbuf, 122 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+
+ /* Test freeing section completely contained in accumulator, starts before
+ * dirty section, and ends in dirty section */
+ if(accum_write(64 * sizeof(int32_t), 128 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 64, wbuf, 128 * sizeof(int32_t));
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ if(accum_write(72 * sizeof(int32_t), 4 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 72, wbuf, 4 * sizeof(int32_t));
+ if(accum_free(70 * sizeof(int32_t), 4 * sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+ /* Check that the accumulator still contains the correct data */
+ if(accum_read(74 * sizeof(int32_t), 118 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(expect + 74, rbuf, 118 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+
+ /* Test freeing section completely contained in accumulator and completely
+ * contains dirty section */
+ if(accum_write(64 * sizeof(int32_t), 128 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 64, wbuf, 128 * sizeof(int32_t));
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ if(accum_write(72 * sizeof(int32_t), 4 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 72, wbuf, 4 * sizeof(int32_t));
+ if(accum_free(70 * sizeof(int32_t), 8 * sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+ /* Check that the accumulator still contains the correct data */
+ if(accum_read(78 * sizeof(int32_t), 114 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(expect + 78, rbuf, 114 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+
+ /* Test freeing section completely contained in accumulator, starts at start
+ * of dirty section, and ends in dirty section */
+ if(accum_write(64 * sizeof(int32_t), 128 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 64, wbuf, 128 * sizeof(int32_t));
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ if(accum_write(72 * sizeof(int32_t), 8 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ HDmemcpy(expect + 72, wbuf, 8 * sizeof(int32_t));
+ if(accum_free(72 * sizeof(int32_t), 4 * sizeof(int32_t)) < 0) FAIL_STACK_ERROR;
+
+ /* Check that the accumulator still contains the correct data */
+ if(accum_read(76 * sizeof(int32_t), 116 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(expect + 76, rbuf, 116 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(expect);
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(expect);
+
+ return 1;
+} /* test_free */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_accum_overlap
+ *
+ * Purpose: This test will write a series of pieces of data
+ * to the accumulator with the goal of overlapping
+ * the writes in various different ways.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Mike McGreevy
+ * October 7, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_accum_overlap(void)
+{
+ int i = 0;
+ int32_t *wbuf, *rbuf;
+
+ TESTING("overlapping write to metadata accumulator");
+
+ /* Allocate buffers */
+ wbuf = (int32_t *)HDmalloc(4096 * sizeof(int32_t));
+ HDassert(wbuf);
+ rbuf = (int32_t *)HDcalloc(4096, sizeof(int32_t));
+ HDassert(rbuf);
+
+ /* Case 1: No metadata in accumulator */
+ /* Write 10 1's at address 40 */
+ /* @0:| 1111111111| */
+ /* Put some data in the accumulator initially */
+ for(i = 0; i < 10; i++)
+ wbuf[i] = 1;
+ if(accum_write(40, 10 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(40, 10 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 10 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 2: End of new piece aligns with start of accumulated data */
+ /* Write 5 2's at address 20 */
+ /* @0:| 222221111111111| */
+ for(i = 0; i < 5; i++)
+ wbuf[i] = 2;
+ if(accum_write(20, 5 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(20, 5 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 5 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 3: Start of new piece aligns with start of accumulated data */
+ /* Write 3 3's at address 20 */
+ /* @0:| 333221111111111| */
+ for(i = 0; i < 3; i++)
+ wbuf[i] = 3;
+ if(accum_write(20, 3 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(20, 3 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 3 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 4: New piece overlaps start of accumulated data */
+ /* Write 5 4's at address 8 */
+ /* @0:| 444443221111111111| */
+ for(i = 0; i < 5; i++)
+ wbuf[i] = 4;
+ if(accum_write(8, 5 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(8, 5 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 5 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 5: New piece completely within accumulated data */
+ /* Write 4 5's at address 48 */
+ /* @0:| 444443221155551111| */
+ for(i = 0; i < 4; i++)
+ wbuf[i] = 5;
+ if(accum_write(48, 4 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(48, 4 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 4 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 6: End of new piece aligns with end of accumulated data */
+ /* Write 3 6's at address 68 */
+ /* @0:| 444443221155551666| */
+ for(i = 0; i < 3; i++)
+ wbuf[i] = 6;
+ if(accum_write(68, 3 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(68, 3 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 3 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 7: New piece overlaps end of accumulated data */
+ /* Write 5 7's at address 76 */
+ /* @0:| 4444432211555516677777| */
+ for(i = 0; i < 5; i++)
+ wbuf[i] = 7;
+ if(accum_write(76, 5 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(76, 5 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 5 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 8: Start of new piece aligns with end of accumulated data */
+ /* Write 3 8's at address 96 */
+ /* @0:| 4444432211555516677777888| */
+ for(i = 0; i < 3; i++)
+ wbuf[i] = 8;
+ if(accum_write(96, 3 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(96, 3 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 3 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Set up expected data buffer and verify contents of
+ accumulator as constructed by cases 1-8, above */
+ for(i = 0; i < 5; i++)
+ wbuf[i] = 4;
+ for(i = 5; i < 6; i++)
+ wbuf[i] = 3;
+ for(i = 6; i < 8; i++)
+ wbuf[i] = 2;
+ for(i = 8; i < 10; i++)
+ wbuf[i] = 1;
+ for(i = 10; i < 14; i++)
+ wbuf[i] = 5;
+ for(i = 14; i < 15; i++)
+ wbuf[i] = 1;
+ for(i = 15; i < 17; i++)
+ wbuf[i] = 6;
+ for(i = 17; i < 22; i++)
+ wbuf[i] = 7;
+ for(i = 22; i < 25; i++)
+ wbuf[i] = 8;
+ if(accum_read(8, 25 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 25 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 9: New piece completely before accumulated data */
+ /* Write 1 9 at address 0 */
+ /* @0:|9 4444432211555516677777888| */
+ for(i = 0; i < 1; i++)
+ wbuf[i] = 9;
+ if(accum_write(0, 1 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(0, 1 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 1 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 10: New piece completely after accumulated data */
+ /* Write 4 3's at address 116 */
+ /* @0:|9 4444432211555516677777888 3333| */
+ for(i = 0; i < 4; i++)
+ wbuf[i] = 3;
+ if(accum_write(116, 4 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(116, 4 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 4 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 11: New piece completely overlaps accumulated data */
+ /* Write 6 4's at address 112 */
+ /* @0:|9 4444432211555516677777888 444444| */
+ for(i = 0; i < 6; i++)
+ wbuf[i] = 4;
+ if(accum_write(112, 6 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(112, 6 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 6 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 0;
+
+error:
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 1;
+} /* test_accum_overlap */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_accum_overlap_clean
+ *
+ * Purpose: This test will write a series of pieces of data
+ * to the accumulator with the goal of overlapping
+ * the writes in various different ways, with clean
+ * areas in the accumulator.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Neil Fortner
+ * October 8, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_accum_overlap_clean(void)
+{
+ int i = 0;
+ int32_t *wbuf, *rbuf;
+
+ TESTING("overlapping write to partially clean metadata accumulator");
+
+ /* Allocate buffers */
+ wbuf = (int32_t *)HDmalloc(4096 * sizeof(int32_t));
+ HDassert(wbuf);
+ rbuf = (int32_t *)HDcalloc(4096, sizeof(int32_t));
+ HDassert(rbuf);
+
+ /* Case 1: No metadata in accumulator */
+ /* Write 10 1's at address 40 */
+ /* @0:| 1111111111| */
+ /* Put some data in the accumulator initially */
+ for(i = 0; i < 10; i++)
+ wbuf[i] = 1;
+ if(accum_write(40, 10 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(40, 10 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 10 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 2: End of new piece aligns with start of clean accumulated data */
+ /* Write 5 2's at address 20 */
+ /* @0:| 222221111111111| */
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ for(i = 0; i < 5; i++)
+ wbuf[i] = 2;
+ if(accum_write(20, 5 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(20, 5 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 5 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 3: Start of new piece aligns with start of accumulated data,
+ * completely encloses dirty section of accumulator */
+ /* Write 6 3's at address 20 */
+ /* @0:| 333333111111111| */
+ for(i = 0; i < 6; i++)
+ wbuf[i] = 3;
+ if(accum_write(20, 6 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(20, 6 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 6 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 4: New piece completely within accumulated data, overlaps
+ * end of dirty section of accumulator */
+ /* Write 2 4's at address 40 */
+ /* @0:| 333334411111111| */
+ for(i = 0; i < 2; i++)
+ wbuf[i] = 4;
+ if(accum_write(40, 2 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(40, 2 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 2 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 5: New piece completely within accumulated data, completely
+ * after dirty section of accumulator */
+ /* Write 2 5's at address 52 */
+ /* @0:| 333334415511111| */
+ for(i = 0; i < 2; i++)
+ wbuf[i] = 5;
+ if(accum_write(52, 2 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(52, 2 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 2 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 6: New piece completely within clean accumulated data */
+ /* Write 3 6's at address 44 */
+ /* @0:| 333334666511111| */
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ for(i = 0; i < 3; i++)
+ wbuf[i] = 6;
+ if(accum_write(44, 3 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(44, 3 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 3 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 7: New piece overlaps start of clean accumulated data */
+ /* Write 2 7's at address 16 */
+ /* @0:| 7733334666511111| */
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ for(i = 0; i < 2; i++)
+ wbuf[i] = 7;
+ if(accum_write(16, 2 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(16, 2 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 2 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 8: New piece overlaps start of accumulated data, completely
+ * encloses dirty section of accumulator */
+ /* Write 4 8's at address 12 */
+ /* @0:| 88883334666511111| */
+ for(i = 0; i < 4; i++)
+ wbuf[i] = 8;
+ if(accum_write(12, 4 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(12, 4 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 4 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 9: Start of new piece aligns with end of clean accumulated data */
+ /* Write 3 9's at address 80 */
+ /* @0:| 88883334666511111999| */
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ for(i = 0; i < 3; i++)
+ wbuf[i] = 9;
+ if(accum_write(80, 3 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(80, 3 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 3 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 10: New piece overlaps end of clean accumulated data */
+ /* Write 3 2's at address 88 */
+ /* @0:| 888833346665111119922| */
+ if(accum_flush() < 0) FAIL_STACK_ERROR;
+ for(i = 0; i < 2; i++)
+ wbuf[i] = 2;
+ if(accum_write(88, 2 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(88, 2 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 2 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 11: New piece overlaps end of accumulated data, completely encloses
+ * dirty section of accumulator */
+ /* Write 4 7's at address 84 */
+ /* @0:| 8888333466651111197777| */
+ for(i = 0; i < 4; i++)
+ wbuf[i] = 7;
+ if(accum_write(84, 4 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(84, 4 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 4 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Set up expected data buffer and verify contents of
+ accumulator as constructed by cases 1-11, above */
+ for(i = 0; i < 4; i++)
+ wbuf[i] = 8;
+ for(i = 4; i < 7; i++)
+ wbuf[i] = 3;
+ for(i = 7; i < 8; i++)
+ wbuf[i] = 4;
+ for(i = 8; i < 11; i++)
+ wbuf[i] = 6;
+ for(i = 11; i < 12; i++)
+ wbuf[i] = 5;
+ for(i = 12; i < 17; i++)
+ wbuf[i] = 1;
+ for(i = 17; i < 18; i++)
+ wbuf[i] = 9;
+ for(i = 18; i < 22; i++)
+ wbuf[i] = 7;
+ if(accum_read(12, 22 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 22 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 0;
+
+error:
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 1;
+} /* test_accum_overlap_clean */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_accum_non_overlap_size
+ *
+ * Purpose: This test will write a series of pieces of data
+ * to the accumulator with the goal of not overlapping
+ * the writes with a data size larger then the accum size.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Allen Byrne
+ * October 8, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_accum_non_overlap_size(void)
+{
+ int i = 0;
+ int32_t *wbuf, *rbuf;
+
+ TESTING("non-overlapping write to accumulator larger then accum_size");
+
+ /* Allocate buffers */
+ wbuf = (int *)HDmalloc(4096 * sizeof(int32_t));
+ HDassert(wbuf);
+ rbuf = (int *)HDcalloc(4096, sizeof(int32_t));
+ HDassert(rbuf);
+
+ /* Case 1: No metadata in accumulator */
+ /* Write 10 1's at address 140 */
+ /* @0:| 1111111111| */
+ /* Put some data in the accumulator initially */
+ for(i = 0; i < 10; i++)
+ wbuf[i] = 1;
+ if(accum_write(140, 10 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(140, 10 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 10 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 9: New piece completely before accumulated data */
+ /* Write 20 9 at address 0 */
+ /* @0:|9 1111111111| */
+ for(i = 0; i < 20; i++)
+ wbuf[i] = 9;
+ if(accum_write(0, 20 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(0, 20 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 20 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 0;
+
+error:
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 1;
+} /* test_accum_non_overlap_size */
+
+/*-------------------------------------------------------------------------
+ * Function: test_accum_overlap_size
+ *
+ * Purpose: This test will write a series of pieces of data
+ * to the accumulator with the goal of overlapping
+ * the writes with a data size completely overlapping
+ * the accumulator at both ends.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Allen Byrne
+ * October 8, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_accum_overlap_size(void)
+{
+ int i = 0;
+ int32_t *wbuf, *rbuf;
+
+ TESTING("overlapping write to accumulator larger then accum_size");
+
+ /* Allocate buffers */
+ wbuf = (int32_t *)HDmalloc(4096 * sizeof(int32_t));
+ HDassert(wbuf);
+ rbuf = (int32_t *)HDcalloc(4096, sizeof(int32_t));
+ HDassert(rbuf);
+
+ /* Case 1: No metadata in accumulator */
+ /* Write 10 1's at address 64 */
+ /* @0:| 1111111111| */
+ /* Put some data in the accumulator initially */
+ for(i = 0; i < 10; i++)
+ wbuf[i] = 1;
+ if(accum_write(64, 10 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(64, 10 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 10 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ /* Case 9: New piece completely before accumulated data */
+ /* Write 72 9 at address 60 */
+ /* @0:|9 1111111111| */
+ for(i = 0; i < 72; i++)
+ wbuf[i] = 9;
+ if(accum_write(60, 72 * sizeof(int32_t), wbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(60, 72 * sizeof(int32_t), rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 72 * sizeof(int32_t)) != 0) TEST_ERROR;
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 0;
+
+error:
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 1;
+} /* test_accum_overlap_size */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_accum_adjust
+ *
+ * Purpose: This test examines the various ways the accumulator might
+ * adjust itself as a result of data appending or prepending
+ * to it.
+ *
+ * This test program covers all the code in H5F_accum_adjust,
+ * but NOT all possible paths through said code. It only covers
+ * six potential paths through the function. (Again, though, each
+ * piece of code within an if/else statement in H5F_accum_adjust is
+ * covered by one of the paths in this test function). Since there
+ * are a ridiculous number of total possible paths through this
+ * function due to its large number of embedded if/else statements,
+ * that's certainly a lot of different test cases to write by hand.
+ * (Though if someone comes across this code and has some free
+ * time, go for it).
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Mike McGreevy
+ * October 11, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_accum_adjust(void)
+{
+ int i = 0;
+ int s = 1048576; /* size of buffer */
+ int32_t *wbuf, *rbuf;
+
+ TESTING("accumulator adjustments after append/prepend of data");
+
+ /* Allocate buffers */
+ wbuf = (int32_t *)HDmalloc((size_t)s * sizeof(int32_t));
+ HDassert(wbuf);
+ rbuf = (int32_t *)HDcalloc((size_t)s, sizeof(int32_t));
+ HDassert(rbuf);
+
+ /* Fill up write buffer */
+ for(i = 0; i < s; i++)
+ wbuf[i] = i + 1;
+
+ /* ================================================================ */
+ /* CASE 1: Prepending small block to large, fully dirty accumulator */
+ /* ================================================================ */
+
+ /* Write data to the accumulator to fill it just under 1MB (max size),
+ * but not quite full. This will force the accumulator to, on subsequent
+ * writes, a) have to adjust since it's nearly full, and b) prevent
+ * an increase in size because it's already at it's maximum size */
+ if(accum_write((1024 * 1024), (1024 * 1024) - 1, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write a small (1KB) block that prepends to the front of the accumulator. */
+ /* ==> Accumulator will need more buffer space */
+ /* ==> Accumulator will try to resize, but see that it's getting too big */
+ /* ==> Size of new block is less than half maximum size of accumulator */
+ /* ==> New block is being prepended to accumulator */
+ /* ==> Accumulator is dirty, it will be flushed. */
+ /* ==> Dirty region overlaps region to eliminate from accumulator */
+ if(accum_write((1024 * 1024) - 1024, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read back and verify first write */
+ if(accum_read((1024 * 1024), (1024 * 1024) - 1, rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (1024 * 1024) - 1) != 0) TEST_ERROR;
+
+ /* Read back and verify second write */
+ if(accum_read((1024 * 1024) - 1024, 1024, rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 1024) != 0) TEST_ERROR;
+
+ /* Reset accumulator for next case */
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ /* ================================================================ */
+ /* Case 2: Prepending large block to large, fully dirty accumulator */
+ /* ================================================================ */
+
+ /* Write data to the accumulator to fill it just under 1MB (max size),
+ * but not quite full. This will force the accumulator to, on subsequent
+ * writes, a) have to adjust since it's nearly full, and b) prevent
+ * an increase in size because it's already at it's maximum size */
+ if(accum_write((1024 * 1024), (1024 * 1024) - 1, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write a large (just under 1MB) block to the front of the accumulator. */
+ /* ==> Accumulator will need more buffer space */
+ /* ==> Accumulator will try to resize, but see that it's getting too big */
+ /* ==> Size of new block is larger than half maximum size of accumulator */
+ /* ==> New block is being prepended to accumulator */
+ /* ==> Accumulator is dirty, it will be flushed. */
+ /* ==> Dirty region overlaps region to eliminate from accumulator */
+ if(accum_write(5, (1024 * 1024) - 5, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read back and verify both pieces of data */
+ if(accum_read(1048576, 1048575, rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 1048576) != 0) TEST_ERROR;
+
+ if(accum_read(5, 1048571, rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 1048571) != 0) TEST_ERROR;
+
+ /* Reset accumulator for next case */
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ /* ========================================================= */
+ /* Case 3: Appending small block to large, clean accumulator */
+ /* ========================================================= */
+
+ /* Write data to the accumulator to fill it just under 1MB (max size),
+ * but not quite full. This will force the accumulator to, on subsequent
+ * writes, a) have to adjust since it's nearly full, and b) prevent
+ * an increase in size because it's already at it's maximum size */
+ if(accum_write(0, (1024 * 1024) - 1, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Flush the accumulator -- we want to test the case when
+ accumulator contains clean data */
+ if(accum_flush() < 0) FAIL_STACK_ERROR
+
+ /* Write a small (1KB) block to the end of the accumulator */
+ /* ==> Accumulator will need more buffer space */
+ /* ==> Accumulator will try to resize, but see that it's getting too big */
+ /* ==> Size of new block is larger than half maximum size of accumulator */
+ /* ==> New block being appended to accumulator */
+ /* ==> Accumulator is NOT dirty */
+ /* ==> Since we're appending, need to adjust location of accumulator */
+ if(accum_write((1024 * 1024) - 1, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write a piece of metadata outside current accumulator to force write
+ to disk */
+ if(accum_write(0, 1, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read in the piece we wrote to disk above, and then verify that
+ the data is as expected */
+ if(accum_read((1024 * 1024) - 1, 1024, rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 1024) != 0) TEST_ERROR;
+
+ /* Reset accumulator for next case */
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ /* ==================================================================== */
+ /* Case 4: Appending small block to large, partially dirty accumulator, */
+ /* with existing dirty region NOT aligning with the new block */
+ /* ==================================================================== */
+
+ /* Write data to the accumulator to fill it just under 1MB (max size),
+ * but not quite full. This will force the accumulator to, on subsequent
+ * writes, a) have to adjust since it's nearly full, and b) prevent
+ * an increase in size because it's already at it's maximum size */
+ if(accum_write(0, (1024 * 1024) - 5, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Flush the accumulator to clean it */
+ if(accum_flush() < 0) FAIL_STACK_ERROR
+
+ /* write to part of the accumulator so just the start of it is dirty */
+ if(accum_write(0, 5, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write a small (~340KB) piece of data to the other end of the accumulator */
+ /* ==> Accumulator will need more buffer space */
+ /* ==> Accumulator will try to resize, but see that it's getting too big */
+ /* ==> Size of new block is less than than half maximum size of accumulator */
+ /* ==> New block being appended to accumulator */
+ /* ==> We can slide the dirty region down, to accomodate the request */
+ /* ==> Max Buffer Size - (dirty offset + adjust size) >= 2 * size) */
+ /* ==> Need to adjust location of accumulator while appending */
+ /* ==> Accumulator will need to be reallocated */
+ if(accum_write(1048571, 349523, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write a piece of metadata outside current accumulator to force write
+ to disk */
+ if(accum_write(1398900, 1, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read in the piece we wrote to disk above, and then verify that
+ the data is as expected */
+ if(accum_read(1048571, 349523, rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 349523) != 0) TEST_ERROR;
+
+ /* Reset accumulator for next case */
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ /* ==================================================================== */
+ /* Case 5: Appending small block to large, partially dirty accumulator, */
+ /* with existing dirty region aligning with new block */
+ /* ==================================================================== */
+
+ /* Write data to the accumulator to fill it just under max size (but not full) */
+ if(accum_write(0, (1024 * 1024) - 5, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Flush the accumulator to clean it */
+ if(accum_flush() < 0) FAIL_STACK_ERROR
+
+ /* write to part of the accumulator so it's dirty, but not entirely dirty */
+ /* (just the begging few bytes will be clean) */
+ if(accum_write(10, (1024 * 1024) - 15, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write a small piece of data to the dirty end of the accumulator */
+ /* ==> Accumulator will need more buffer space */
+ /* ==> Accumulator will try to resize, but see that it's getting too big */
+ /* ==> Size of new block is less than than half maximum size of accumulator */
+ /* ==> New block being appended to accumulator */
+ /* ==> We can slide the dirty region down, to accomodate the request */
+ /* ==> Max Buffer Size - (dirty offset + adjust size) < 2 * size) */
+ /* ==> Need to adjust location of accumulator while appending */
+ if(accum_write((1024 * 1024) - 5, 10, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write a piece of metadata outside current accumulator to force write
+ to disk */
+ if(accum_write(0, 1, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read in the piece we wrote to disk above, and then verify that
+ the data is as expected */
+ if(accum_read((1024 * 1024) - 5, 10, rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 10) != 0) TEST_ERROR;
+
+ /* Reset accumulator for next case */
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ /* ================================================================= */
+ /* Case 6: Appending small block to large, fully dirty accumulator */
+ /* ================================================================= */
+
+ /* Write data to the accumulator to fill it just under 1MB (max size),
+ * but not quite full. This will force the accumulator to, on subsequent
+ * writes, a) have to adjust since it's nearly full, and b) prevent
+ * an increase in size because it's already at it's maximum size */
+ if(accum_write(0, (1024 * 1024) - 5, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write a small (~340KB) piece of data to the end of the accumulator */
+ /* ==> Accumulator will need more buffer space */
+ /* ==> Accumulator will try to resize, but see that it's getting too big */
+ /* ==> Size of new block is less than than half maximum size of accumulator */
+ /* ==> New block being appended to accumulator */
+ /* ==> We cannot slide dirty region down, it's all dirty */
+ /* ==> Dirty region overlaps region to eliminate from accumulator */
+ /* ==> Need to adjust location of accumulator while appending */
+ if(accum_write(1048571, 349523, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write a piece of metadata outside current accumulator to force write
+ to disk */
+ if(accum_write(1398900, 1, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read in the piece we wrote to disk above, and then verify that
+ the data is as expected */
+ if(accum_read(1048571, 349523, rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 349523) != 0) TEST_ERROR;
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 0;
+
+error:
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 1;
+} /* test_accum_adjust */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_read_after
+ *
+ * Purpose: This test will verify the case when metadata is read partly
+ * from the accumulator and partly from disk. The test will
+ * write a block of data at address 512, force the data to be
+ * written to disk, write new data partially overlapping the
+ * original block from below, then read data at address 512.
+ * The data read should be partly new and partly original.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Larry Knox
+ * October 8, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_read_after(void)
+{
+ int i = 0;
+ int s = 128; /* size of buffer */
+ int32_t *wbuf, *rbuf;
+
+ TESTING("reading data from both accumulator and disk");
+
+ /* Allocate buffers */
+ wbuf = (int32_t *)HDmalloc((size_t)s * sizeof(int32_t));
+ HDassert(wbuf);
+ rbuf = (int32_t *)HDcalloc((size_t)s, sizeof(int32_t));
+ HDassert(rbuf);
+
+ /* Fill up write buffer with 1s */
+ for(i = 0; i < s; i++)
+ wbuf[i] = 1;
+
+ /* Write data to the accumulator to fill it. */
+ if(accum_write(512, 512, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write a piece of metadata outside current accumulator to force write
+ to disk */
+ if(accum_write(0, 1, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Fill up write buffer with 2s */
+ for(i = 0; i < s; i++)
+ wbuf[i] = 2;
+
+ /* Write a block of 2s of the original size that will overlap the lower half
+ of the original block */
+ if(accum_write(256, 512, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read 128 bytes at the original address, and then */
+ if(accum_read(512, 512, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Set the second half of wbuf back to 1s */
+ for(i = 64; i < s; i++)
+ wbuf[i] = 1;
+
+ /* Read in the piece we wrote to disk above, and then verify that
+ the data is as expected */
+ if(accum_read(512, 512, rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf, rbuf, 128) != 0) TEST_ERROR;
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 0;
+
+error:
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+
+ return 1;
+} /* end test_read_after */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_big
+ *
+ * Purpose: This test exercises writing large pieces of metadata to the
+ * file.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * October 12, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_big(void)
+{
+ uint8_t *wbuf, *wbuf2, *rbuf, *zbuf; /* Buffers for reading & writing, etc */
+ unsigned u; /* Local index variable */
+
+ /* Allocate space for the write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(BIG_BUF_SIZE);
+ HDassert(wbuf);
+ wbuf2 = (uint8_t *)HDmalloc(BIG_BUF_SIZE);
+ HDassert(wbuf2);
+ rbuf = (uint8_t *)HDcalloc(BIG_BUF_SIZE + 1536, 1);
+ HDassert(rbuf);
+ zbuf = (uint8_t *)HDcalloc(BIG_BUF_SIZE + 1536, 1);
+ HDassert(zbuf);
+
+ /* Initialize write buffers */
+ for(u = 0; u < BIG_BUF_SIZE; u++) {
+ wbuf[u] = (uint8_t)u;
+ wbuf2[u] = (uint8_t)(u + 1);
+ } /* end for */
+
+ TESTING("large metadata I/O operations");
+
+ /* Write large data segment to file */
+ if(accum_write(0, BIG_BUF_SIZE, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read entire segment back from file */
+ if(accum_read(0, BIG_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read */
+ if(HDmemcmp(wbuf, rbuf, BIG_BUF_SIZE) != 0) TEST_ERROR;
+
+
+ /* Reset data in file back to zeros & reset the read buffer */
+ if(accum_write(0, BIG_BUF_SIZE, zbuf) < 0) FAIL_STACK_ERROR;
+ HDmemset(rbuf, 0, BIG_BUF_SIZE);
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+
+ /* Write small section to middle of accumulator */
+ if(accum_write(1024, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read entire segment back from file */
+ /* (Read covers entire dirty region) */
+ if(accum_read(0, BIG_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read */
+ if(HDmemcmp(zbuf, rbuf, 1024) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf + 1024, 1024) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf + 2048, (BIG_BUF_SIZE - 2048)) != 0) TEST_ERROR;
+
+
+ /* Reset data in file back to zeros & reset the read buffer */
+ if(accum_write(1024, 1024, zbuf) < 0) FAIL_STACK_ERROR;
+ HDmemset(rbuf, 0, BIG_BUF_SIZE);
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+
+ /* Write small section to overlap with end of "big" region */
+ if(accum_write(BIG_BUF_SIZE - 512, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read entire segment back from file */
+ /* (Read covers bottom half of dirty region) */
+ if(accum_read(0, BIG_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read */
+ if(HDmemcmp(zbuf, rbuf, (BIG_BUF_SIZE - 512)) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf + (BIG_BUF_SIZE - 512), 512) != 0) TEST_ERROR;
+
+
+ /* Reset data in file back to zeros & reset the read buffer */
+ if(accum_write(BIG_BUF_SIZE - 512, 1024, zbuf) < 0) FAIL_STACK_ERROR;
+ HDmemset(rbuf, 0, BIG_BUF_SIZE);
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+
+ /* Write small section to overlap with beginning of "big" region */
+ if(accum_write(0, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read entire segment back from file */
+ /* (Read covers bottom half of dirty region) */
+ if(accum_read(512, BIG_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read */
+ if(HDmemcmp(wbuf + 512, rbuf, 512) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf + 512, (BIG_BUF_SIZE - 512)) != 0) TEST_ERROR;
+
+
+ /* Reset data in file back to zeros & reset the read buffer */
+ if(accum_write(0, 1024, zbuf) < 0) FAIL_STACK_ERROR;
+ HDmemset(rbuf, 0, BIG_BUF_SIZE);
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+
+ /* Write small section to middle of accumulator */
+ /* (With write buffer #1) */
+ if(accum_write(1024, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write entire segment to from file */
+ /* (With write buffer #2) */
+ /* (Write covers entire dirty region) */
+ if(accum_write(0, BIG_BUF_SIZE, wbuf2) < 0) FAIL_STACK_ERROR;
+
+ /* Read entire segment back from file */
+ if(accum_read(0, BIG_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read */
+ if(HDmemcmp(wbuf2, rbuf, BIG_BUF_SIZE) != 0) TEST_ERROR;
+
+
+ /* Reset data in file back to zeros & reset the read buffer */
+ if(accum_write(0, BIG_BUF_SIZE, zbuf) < 0) FAIL_STACK_ERROR;
+ HDmemset(rbuf, 0, BIG_BUF_SIZE);
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+
+ /* Write small section to overlap with end of "big" region */
+ /* (With write buffer #1) */
+ if(accum_write(BIG_BUF_SIZE - 512, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write entire segment to from file */
+ /* (With write buffer #2) */
+ /* (Read covers bottom half of dirty region) */
+ if(accum_write(0, BIG_BUF_SIZE, wbuf2) < 0) FAIL_STACK_ERROR;
+
+ /* Read both segments back from file */
+ if(accum_read(0, BIG_BUF_SIZE + 512, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read */
+ if(HDmemcmp(wbuf2, rbuf, BIG_BUF_SIZE) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf + 512, rbuf + BIG_BUF_SIZE, 512) != 0) TEST_ERROR;
+
+
+ /* Reset data in file back to zeros & reset the read buffer */
+ if(accum_write(0, BIG_BUF_SIZE + 512, zbuf) < 0) FAIL_STACK_ERROR;
+ HDmemset(rbuf, 0, BIG_BUF_SIZE + 512);
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+
+ /* Write small section to be past "big" region */
+ /* (With write buffer #1) */
+ if(accum_write(BIG_BUF_SIZE + 512, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read section before "big" region */
+ /* (To enlarge accumulator, to it will intersect with big write) */
+ if(accum_read(BIG_BUF_SIZE - 512, 1024, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write entire segment to from file */
+ /* (With write buffer #2) */
+ /* (Doesn't overlap with small section) */
+ if(accum_write(0, BIG_BUF_SIZE, wbuf2) < 0) FAIL_STACK_ERROR;
+
+ /* Read both segments & gap back from file */
+ if(accum_read(0, BIG_BUF_SIZE + 1024, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read */
+ if(HDmemcmp(wbuf2, rbuf, BIG_BUF_SIZE) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf + BIG_BUF_SIZE, 512) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf + BIG_BUF_SIZE + 512, 512) != 0) TEST_ERROR;
+
+
+ /* Reset data in file back to zeros & reset the read buffer */
+ if(accum_write(0, BIG_BUF_SIZE + 1536, zbuf) < 0) FAIL_STACK_ERROR;
+ HDmemset(rbuf, 0, BIG_BUF_SIZE + 1024);
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+
+ /* Write small section to be past "big" region */
+ /* (With write buffer #1) */
+ if(accum_write(BIG_BUF_SIZE + 512, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read section before "big" region */
+ /* (To enlarge accumulator, so it will intersect with big write) */
+ if(accum_read(BIG_BUF_SIZE - 512, 1024, rbuf) < 0) FAIL_STACK_ERROR;
+ if(accum_read(BIG_BUF_SIZE + 1536, 1024, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write entire segment to from file */
+ /* (With write buffer #2) */
+ /* (Overwriting dirty region, but not invalidating entire accumulator) */
+ if(accum_write(1536, BIG_BUF_SIZE, wbuf2) < 0) FAIL_STACK_ERROR;
+
+ /* Read both segments & gap back from file */
+ if(accum_read(0, BIG_BUF_SIZE + 1536, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read */
+ if(HDmemcmp(zbuf, rbuf, 1536) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf2, rbuf + 1536, BIG_BUF_SIZE) != 0) TEST_ERROR;
+
+
+ /* Reset data in file back to zeros & reset the read buffer */
+ if(accum_write(1536, BIG_BUF_SIZE, zbuf) < 0) FAIL_STACK_ERROR;
+ HDmemset(rbuf, 0, BIG_BUF_SIZE + 1536);
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+
+ /* Write small section before "big" region */
+ /* (With write buffer #1) */
+ if(accum_write(1024, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read section before "big" region */
+ /* (To enlarge accumulator, so it will intersect with big write) */
+ if(accum_read(0, 1024, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write entire segment to from file */
+ /* (With write buffer #2) */
+ /* (Overwriting dirty region, but not invalidating entire accumulator) */
+ if(accum_write(512, BIG_BUF_SIZE, wbuf2) < 0) FAIL_STACK_ERROR;
+
+ /* Read both segments & gap back from file */
+ if(accum_read(0, BIG_BUF_SIZE + 512, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read */
+ if(HDmemcmp(zbuf, rbuf, 512) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf2, rbuf + 512, BIG_BUF_SIZE) != 0) TEST_ERROR;
+
+
+ /* Reset data in file back to zeros & reset the read buffer */
+ if(accum_write(512, BIG_BUF_SIZE, zbuf) < 0) FAIL_STACK_ERROR;
+ HDmemset(rbuf, 0, BIG_BUF_SIZE + 512);
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+
+ /* Write small section before "big" region */
+ /* (With write buffer #1) */
+ if(accum_write(0, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read section before "big" region */
+ /* (To enlarge accumulator, so it will intersect with big write) */
+ if(accum_read(1024, 1024, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write entire segment to from file */
+ /* (With write buffer #2) */
+ /* (Avoiding dirty region, and not invalidating entire accumulator) */
+ if(accum_write(1536, BIG_BUF_SIZE, wbuf2) < 0) FAIL_STACK_ERROR;
+
+ /* Read both segments & gap back from file */
+ if(accum_read(0, BIG_BUF_SIZE + 1536, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read */
+ if(HDmemcmp(wbuf, rbuf, 1024) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf + 1024, 512) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf2, rbuf + 1536, BIG_BUF_SIZE) != 0) TEST_ERROR;
+
+
+ /* Reset data in file back to zeros & reset the read buffer */
+ if(accum_write(0, BIG_BUF_SIZE + 1536, zbuf) < 0) FAIL_STACK_ERROR;
+ HDmemset(rbuf, 0, BIG_BUF_SIZE + 1536);
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+
+ /* Write small section before "big" region */
+ /* (With write buffer #1) */
+ if(accum_write(0, 1024, wbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Read section before "big" region */
+ /* (To enlarge accumulator, so it will intersect with big write) */
+ if(accum_read(1024, 1024, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Write entire segment to from file */
+ /* (With write buffer #2) */
+ /* (Partially overwriting dirty region, and not invalidating entire accumulator) */
+ if(accum_write(512, BIG_BUF_SIZE, wbuf2) < 0) FAIL_STACK_ERROR;
+
+ /* Read both segments back from file */
+ if(accum_read(0, BIG_BUF_SIZE + 512, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read */
+ if(HDmemcmp(wbuf, rbuf, 512) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf2, rbuf + 512, BIG_BUF_SIZE) != 0) TEST_ERROR;
+
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(wbuf2);
+ HDfree(rbuf);
+ HDfree(zbuf);
+
+ return 0;
+
+error:
+ HDfree(wbuf);
+ HDfree(wbuf2);
+ HDfree(rbuf);
+ HDfree(zbuf);
+
+ return 1;
+} /* end test_big() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_random_write
+ *
+ * Purpose: This test writes random pieces of data to the file and
+ * then reads it all back.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * October 11, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_random_write(void)
+{
+ uint8_t *wbuf, *rbuf; /* Buffers for reading & writing */
+ unsigned long seed = 0; /* Random # seed */
+ size_t *off; /* Offset of buffer segments to write */
+ size_t *len; /* Size of buffer segments to write */
+ size_t cur_off; /* Current offset */
+ size_t nsegments; /* Number of segments to write */
+ size_t swap; /* Position to swap with */
+ unsigned u; /* Local index variable */
+
+ /* Allocate space for the write & read buffers */
+ wbuf = (uint8_t *)malloc(RANDOM_BUF_SIZE);
+ HDassert(wbuf);
+ rbuf = (uint8_t *)calloc(RANDOM_BUF_SIZE, 1);
+ HDassert(rbuf);
+
+ /* Initialize write buffer */
+ for(u = 0; u < RANDOM_BUF_SIZE; u++)
+ wbuf[u] = (uint8_t)u;
+
+ TESTING("random writes to accumulator");
+
+ /* Choose random # seed */
+ seed = (unsigned long)HDtime(NULL);
+#ifdef QAK
+/* seed = (unsigned long)1155438845; */
+HDfprintf(stderr, "Random # seed was: %lu\n", seed);
+#endif /* QAK */
+ HDsrandom(seed);
+
+ /* Allocate space for the segment length buffer */
+ off = (size_t *)malloc(MAX_RANDOM_SEGMENTS * sizeof(size_t));
+ HDassert(off);
+ len = (size_t *)malloc(MAX_RANDOM_SEGMENTS * sizeof(size_t));
+ HDassert(len);
+
+ /* Randomly choose lengths of segments */
+ cur_off = 0;
+ for(u = 0; u < MAX_RANDOM_SEGMENTS; ) {
+ size_t length = 0; /* Length of current segment */
+
+ /* Choose random length of segment, allowing for variance */
+ do {
+ length += (size_t)(HDrandom() % RAND_SEG_LEN) + 1;
+ } while((HDrandom() & 256) >= 128); /* end while */
+
+ /* Check for going off end of buffer */
+ if((cur_off + length) > RANDOM_BUF_SIZE)
+ length = RANDOM_BUF_SIZE - cur_off;
+
+ /* Set offset & length of segment */
+ off[u] = cur_off;
+ len[u] = length;
+
+ /* Advance array offset */
+ u++;
+
+ /* Advance current offset */
+ cur_off += length;
+
+ /* If we've used up entire buffer before hitting limit of segments, get out */
+ if(cur_off >= RANDOM_BUF_SIZE)
+ break;
+ } /* end for */
+ nsegments = u;
+
+ /* Increase length of last segment, if it doesn't reach end of buffer */
+ if(nsegments < MAX_RANDOM_SEGMENTS)
+ len[nsegments - 1] = RANDOM_BUF_SIZE - off[nsegments - 1];
+
+ /* Shuffle order of segments, to randomize positions to write */
+ for(u = 0; u < nsegments; u++) {
+ size_t tmp; /* Temporary holder for offset & length values */
+
+ /* Choose value within next few elements to to swap with */
+ swap = ((size_t)HDrandom() % 8) + u;
+ if(swap >= nsegments)
+ swap = nsegments - 1;
+
+ /* Swap values */
+ tmp = off[u]; off[u] = off[swap]; off[swap] = tmp;
+ tmp = len[u]; len[u] = len[swap]; len[swap] = tmp;
+ } /* end for */
+
+ /* Write data segments to file */
+ for(u = 0; u < nsegments; u++) {
+ if(accum_write(RANDOM_BASE_OFF + off[u], len[u], wbuf + off[u]) < 0) FAIL_STACK_ERROR;
+
+ /* Verify individual reads */
+ if(accum_read(RANDOM_BASE_OFF + off[u], len[u], rbuf) < 0) FAIL_STACK_ERROR;
+ if(HDmemcmp(wbuf + off[u], rbuf, len[u]) != 0) TEST_ERROR;
+ } /* end for */
+
+ /* Read entire region back from file */
+ if(accum_read(RANDOM_BASE_OFF, RANDOM_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
+
+ /* Verify data read back in */
+ if(HDmemcmp(wbuf, rbuf, RANDOM_BUF_SIZE) != 0) TEST_ERROR;
+
+ if(accum_reset() < 0) FAIL_STACK_ERROR;
+
+ PASSED();
+
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(off);
+ HDfree(len);
+
+ return 0;
+
+error:
+ /* Release memory */
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(off);
+ HDfree(len);
+
+ HDfprintf(stderr, "Random # seed was: %lu\n", seed);
+ return 1;
+} /* end test_random_write() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: accum_printf
+ *
+ * Purpose: Debug function to print some stats about the accumulator
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Mike McGreevy
+ * October 7, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+accum_printf(void)
+{
+ H5F_meta_accum_t * accum = &f->shared->accum;
+
+ printf("\n");
+ printf("Current contents of accumulator:\n");
+ if (accum->alloc_size == 0) {
+ printf("=====================================================\n");
+ printf(" No accumulator allocated.\n");
+ printf("=====================================================\n");
+ } else {
+ printf("=====================================================\n");
+ printf(" accumulator allocated size == %lu\n", (unsigned long)accum->alloc_size);
+ printf(" accumulated data size == %lu\n", (unsigned long)accum->size);
+ printf(" accumulator dirty? == %d\n", accum->dirty);
+ printf("=====================================================\n");
+ printf(" start of accumulated data, loc = %llu\n", accum->loc);
+ if (accum->dirty) printf(" start of dirty region, loc = %llu\n", accum->loc + accum->dirty_off);
+ if (accum->dirty) printf(" end of dirty region, loc = %llu\n", accum->loc + accum->dirty_off + accum->dirty_len);
+ printf(" end of accumulated data, loc = %llu\n", accum->loc + accum->size);
+ printf(" end of accumulator allocation, loc = %llu\n", accum->loc + accum->alloc_size);
+ printf("=====================================================\n");
+ }
+ printf("\n\n");
+} /* accum_printf() */
+
diff --git a/test/be_data.h5 b/test/be_data.h5
index f906545..0feefa3 100644
--- a/test/be_data.h5
+++ b/test/be_data.h5
Binary files differ
diff --git a/test/big.c b/test/big.c
index 8d65559..24b2d25 100644
--- a/test/big.c
+++ b/test/big.c
@@ -16,16 +16,45 @@
/*
* Programmer: Robb Matzke <matzke@llnl.gov>
* Wednesday, April 8, 1998
+ * Modified: Albert Cheng <acheng@hdfgroup.org>
+ * September 11, 2010
+ */
+/*
+ * The purpose of this test is to verify if a virtual file driver can handle:
+ * a. Large file (2GB)
+ * This should exceed 32bits I/O system since offset is a signed
+ * integral type (in order to support negative offset with respect to
+ * end of file).
+ * b. Extra Large file (4GB)
+ * This definite exceeds 32bit I/O and file systems.
+ * c. Huge file (tens of GB)
+ * This verifies the HDF5 library handles big logical file size
+ * correctly.
+ * In practice, if a VFD can handle a big file size, there is no need to
+ * test the smaller file sizes. E.g., If it can handle the Huge file,
+ * there is no need to test the Extra large or Large files. Therefore the
+ * test starts with larger size files and continues to test the smaller size
+ * files only if the large sige file tests have failed.
+ *
+ * Another consideration is that even if a VFD is capable to handle a
+ * huge file but it is likely to take a long time to write every byte
+ * of a huge file. E.g., a simple workstation may have disks of write
+ * speed of 10MB/sec. A huge file of 30GB will take about an hour to
+ * write it. Therefore, this test will run the huge file test only if the
+ * underlying file system supports sparse file. (A Sparse file here means
+ * that disk space is allocated only when the contents are actually written.
+ * E.g., If one creates a new file, seeks forward 10 million bytes, writes
+ * 1 bytes and closes the file, then a sparse file, will show file size of
+ * 10 million bytes but actaully uses only couple disk blocks, much smaller
+ * than the formal file size.)
+ *
+ * One more consideration is that we want to distinguish an HDF5 library
+ * failure from some system limits such as current free disk space or user
+ * disk space quota. Therefore, the test will first attempt to verify no
+ * such limits exist before running the actual VFD tests.
*/
#include "h5test.h"
-const char *FILENAME[] = {
- "big",
- "sec2",
- "stdio",
- NULL
-};
-
#define DNAME "big.data"
#define WRT_N 50
@@ -42,8 +71,29 @@ const char *FILENAME[] = {
# define GB8LL 0 /*cannot do the test*/
#endif
+/* Define Small, Large, Extra Large, Huge File which
+ * corrspond to less than 2GB, 2GB, 4GB, and tens of GB file size.
+ * NOFILE stands for "no file" to be tested.
+ */
+typedef enum fsizes_t { SFILE, LFILE, XLFILE, HUGEFILE, NOFILE} fsizes_t;
+/* Lists of vfd to test */
+typedef enum vfd_t { SEC2_VFD, STDIO_VFD, FAMILY_VFD } vfd_t;
+fsizes_t file_size= NOFILE;
+
+const char *FILENAME[] = {
+ "big",
+ "sec2",
+ "stdio",
+ NULL
+};
+int cflag=1; /* check file system before test */
+int sparse_support=0; /* sparse file supported, default false */
+int have_space=0; /* enough space for huge file test, default false */
+hsize_t family_size_def=FAMILY_SIZE; /* default family file size */
+
/* Protocols */
static void usage(void);
+int testvfd(vfd_t vfd);
/* Array used to record all addresses at which data has been written */
/* so far. Used to prevent overlapping writes. */
@@ -129,7 +179,7 @@ is_sparse(void)
if (5!=HDwrite(fd, "hello", (size_t)5)) return 0;
if (HDclose(fd) < 0) return 0;
if (HDstat("x.h5", &sb) < 0) return 0;
- if (HDunlink("x.h5") < 0) return 0;
+ if (HDremove("x.h5") < 0) return 0;
#ifdef H5_HAVE_STAT_ST_BLOCKS
return ((unsigned long)sb.st_blocks*512 < (unsigned long)sb.st_size);
#else
@@ -156,25 +206,67 @@ is_sparse(void)
*
*-------------------------------------------------------------------------
*/
-static int
-supports_big(void)
+static fsizes_t
+supports_big(vfd_t vfd)
{
- int fd;
+ int fd = -1;
+ fsizes_t fsize = NOFILE;
+
+ switch (vfd){
+ case FAMILY_VFD:
+ case SEC2_VFD:
+ case STDIO_VFD:
+ if ((fd=HDopen("y.h5", O_RDWR|O_TRUNC|O_CREAT, 0666)) < 0)
+ goto error;
- if ((fd=HDopen("y.h5", O_RDWR|O_TRUNC|O_CREAT, 0666)) < 0) return 0;
+ /* Write a few byte at the beginning */
+ if (5!=HDwrite(fd, "hello", (size_t)5))
+ goto quit;
+ fsize = SFILE;
- /* Write a few bytes at 2GB */
- if (HDlseek(fd, 2*GB, SEEK_SET)!=2*GB) return 0;
- if (5!=HDwrite(fd, "hello", (size_t)5)) return 0;
+ /* Write a few bytes at 2GB */
+ if (HDlseek(fd, 2*GB, SEEK_SET)!=2*GB)
+ goto quit;
+ if (5!=HDwrite(fd, "hello", (size_t)5))
+ goto quit;
+ fsize = LFILE;
- /* Write a few bytes at 4GB */
- if (HDlseek(fd, 4*GB, SEEK_SET) != 4*GB) return 0;
- if (5!=HDwrite(fd, "hello", (size_t)5)) return 0;
+ /* Write a few bytes at 4GB */
+ if (HDlseek(fd, 4*GB, SEEK_SET) != 4*GB)
+ goto quit;
+ if (5!=HDwrite(fd, "hello", (size_t)5))
+ goto quit;
+ fsize = XLFILE;
- if (HDclose(fd) < 0) return 0;
- if (HDremove("y.h5") < 0) return 0;
+ /* If this supports sparse_file, write a few bytes at 32GB */
+ if (!sparse_support)
+ goto quit;
+ if (HDlseek(fd, 32*GB, SEEK_SET) != 32*GB)
+ goto quit;
+ if (5!=HDwrite(fd, "hello", (size_t)5))
+ goto quit;
+ fsize = HUGEFILE;
- return (1);
+ break;
+ default:
+ /* unknown or unsupported VFD */
+ goto error;
+ break;
+ }
+
+quit:
+ if (HDclose(fd) < 0)
+ goto error;
+ if (HDremove("y.h5") < 0)
+ goto error;
+ return(fsize);
+
+error:
+ if (fd >= 0){
+ HDclose(fd);
+ HDremove("y.h5");
+ }
+ return (fsize);
}
@@ -231,7 +323,7 @@ enough_room(hid_t fapl)
HDsnprintf(name, sizeof name, filename, i);
if(HDclose(fd[i]) < 0)
ret_value=0;
- HDunlink(name);
+ HDremove(name);
}
return ret_value;
@@ -257,7 +349,7 @@ enough_room(hid_t fapl)
*-------------------------------------------------------------------------
*/
static int
-writer (char* filename, hid_t fapl, int wrt_n)
+writer (char* filename, hid_t fapl, fsizes_t testsize, int wrt_n)
{
hsize_t size1[4] = {8, 1024, 1024, 1024};
hsize_t size2[1] = {GB8LL};
@@ -269,7 +361,39 @@ writer (char* filename, hid_t fapl, int wrt_n)
FILE *out = fopen(DNAME, "w");
hid_t dcpl;
- TESTING("large dataset write");
+ switch(testsize){
+ case LFILE:
+ TESTING("Large dataset write(2GB)");
+ /* reduce size1 to produce a 2GB dataset */
+ size1[1] = 1024/16;
+ size2[0] /= 16;
+ break;
+
+ case XLFILE:
+ TESTING("Extra large dataset write(4GB)");
+ /* reduce size1 to produce a 4GB dataset */
+ size1[1] = 1024/8;
+ size2[0] /= 8;
+ break;
+
+ case HUGEFILE:
+ TESTING("Huge dataset write");
+ /* Leave size1 as 32GB */
+ break;
+
+ case SFILE:
+ TESTING("small dataset write(1GB)");
+ /* reduce size1 to produce a 1GB dataset */
+ size1[1] = 1024/32;
+ size2[0] /= 32;
+ break;
+
+ case NOFILE:
+ /* what to do?? */
+ HDfprintf(stdout, "Unexpected file size of NOFILE\n");
+ goto error;
+ break;
+ }
/*
* We might be on a machine that has 32-bit files, so create an HDF5 file
@@ -475,6 +599,124 @@ usage(void)
+/* Flush stdout at the end of this test routine to ensure later output to */
+/* stderr will not come out before it.*/
+int testvfd(vfd_t vfd)
+{
+ hid_t fapl=-1;
+ hsize_t family_size;
+ char filename[1024];
+ fsizes_t testsize;
+
+
+ switch(vfd){
+ case FAMILY_VFD:
+ /* Test huge file with the family driver */
+ puts("Testing big file with the Family Driver ");
+ if ((fapl=H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+
+ if (H5Pset_fapl_family(fapl, family_size_def, H5P_DEFAULT) < 0)
+ goto error;
+
+ if (cflag){
+ /*
+ * We shouldn't run this test if the file system doesn't support holes
+ * because we would generate multi-gigabyte files.
+ */
+ puts("Checking if file system is adequate for this test...");
+ if (sizeof(long long)<8 || 0==GB8LL) {
+ puts("Test skipped because sizeof(long long) is too small. This");
+ puts("hardware apparently doesn't support 64-bit integer types.");
+ usage();
+ goto quit;
+ }
+ if (!sparse_support) {
+ puts("Test skipped because file system does not support holes.");
+ usage();
+ goto quit;
+ }
+ if (!enough_room(fapl)) {
+ puts("Test skipped because of quota (file size or num open files).");
+ usage();
+ goto quit;
+ }
+ }
+
+ /* Do the test with the Family Driver */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
+
+ if (writer(filename, fapl, HUGEFILE, WRT_N)) goto error;
+ if (reader(filename, fapl)) goto error;
+
+ puts("Test passed with the Family Driver.");
+ break;
+
+ case SEC2_VFD:
+ testsize = supports_big(SEC2_VFD);
+ if (testsize == NOFILE) {
+ HDfprintf(stdout, "Test for sec2 is skipped because file system does not support big files.\n");
+ goto quit;
+ }
+ /* Test big file with the SEC2 driver */
+ puts("Testing big file with the SEC2 Driver ");
+
+ if ((fapl=H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_fapl_sec2(fapl) < 0)
+ goto error;
+
+ h5_fixname(FILENAME[1], fapl, filename, sizeof filename);
+
+ if (writer(filename, fapl, testsize, WRT_N)) goto error;
+ if (reader(filename, fapl)) goto error;
+
+ puts("Test passed with the SEC2 Driver.");
+ break;
+
+ case STDIO_VFD:
+ testsize = supports_big(STDIO_VFD);
+ if (testsize == NOFILE) {
+ HDfprintf(stdout, "Test for stdio is skipped because file system does not support big files.\n");
+ goto quit;
+ }
+ puts("\nTesting big file with the STDIO Driver ");
+
+ if ((fapl=H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_fapl_stdio(fapl) < 0)
+ goto error;
+
+ h5_fixname(FILENAME[2], fapl, filename, sizeof filename);
+
+ if (writer(filename, fapl, testsize, WRT_N)) goto error;
+ if (reader(filename, fapl)) goto error;
+ puts("Test passed with the STDIO Driver.");
+ break;
+
+ default:
+ puts("Unsupprted VFD");
+ usage();
+ goto error;;
+ } /* end of switch (vfd) */
+
+quit:
+ /* End with normal return code */
+ /* Clean up the test file */
+ if (h5_cleanup(FILENAME, fapl)) HDremove(DNAME);
+ fflush(stdout);
+ return 0;
+
+
+error:
+ if (fapl>=0) H5Pclose(fapl);
+ puts("*** TEST FAILED ***");
+ fflush(stdout);
+ return 1;
+}
+
+
+
/*-------------------------------------------------------------------------
* Function: main
*
@@ -501,15 +743,9 @@ usage(void)
int
main (int ac, char **av)
{
- hid_t fapl=-1;
- hsize_t family_size;
- hsize_t family_size_def; /* default family file size */
unsigned long seed = 0; /* Random # seed */
- int cflag=1; /* check file system before test */
- char filename[1024];
/* parameters setup */
- family_size_def = FAMILY_SIZE;
while (--ac > 0){
av++;
@@ -538,6 +774,11 @@ main (int ac, char **av)
}
}
+ /* check sparse file support unless cflag is not set. */
+ if (cflag)
+ sparse_support = is_sparse();
+
+
/* Choose random # seed */
seed = (unsigned long)HDtime(NULL);
#ifdef QAK
@@ -546,113 +787,18 @@ HDfprintf(stderr, "Random # seed was: %lu\n", seed);
#endif /* QAK */
HDsrandom(seed);
- /* Reset library */
- h5_reset();
- fapl = h5_fileaccess();
-
- /* Test big file with the family driver */
- puts("Testing big file with the Family Driver ");
- if (H5FD_FAMILY!=H5Pget_driver(fapl)) {
- HDfprintf(stdout,
- "Changing file drivers to the family driver, %Hu bytes each\n",
- family_size_def);
- if (H5Pset_fapl_family(fapl, family_size_def, H5P_DEFAULT) < 0) goto error;
- } else if (H5Pget_fapl_family(fapl, &family_size, NULL) < 0) {
+/*=================================================*/
+ if (testvfd(FAMILY_VFD) != 0)
+ goto error;
+ if (testvfd(SEC2_VFD) != 0)
+ goto error;
+ if (testvfd(STDIO_VFD) != 0)
goto error;
- } else if (family_size!=family_size_def) {
- HDfprintf(stdout, "Changing family member size from %Hu to %Hu\n",
- family_size, family_size_def);
- if (H5Pset_fapl_family(fapl, family_size_def, H5P_DEFAULT) < 0)
- goto error;
- }
-
- if (cflag){
- /*
- * We shouldn't run this test if the file system doesn't support holes
- * because we would generate multi-gigabyte files.
- */
- puts("Checking if file system is adequate for this test...");
- if (sizeof(long long)<8 || 0==GB8LL) {
- puts("Test skipped because sizeof(long long) is too small. This");
- puts("hardware apparently doesn't support 64-bit integer types.");
- usage();
- goto quit;
- }
- if (!is_sparse()) {
- puts("Test skipped because file system does not support holes.");
- usage();
- goto quit;
- }
- if (!enough_room(fapl)) {
- puts("Test skipped because of quota (file size or num open files).");
- usage();
- goto quit;
- }
- }
-
- /* Do the test with the Family Driver */
- h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
-
- if (writer(filename, fapl, WRT_N)) goto error;
- if (reader(filename, fapl)) goto error;
-
- puts("Test passed with the Family Driver.");
-
- /*
- * We shouldn't run this test if the file system doesn't support big files
- * because we would generate multi-gigabyte files.
- */
- puts("\nChecking if file system supports big files...");
- if (!supports_big()) {
- puts("Tests for sec2 and stdio are skipped because file system does not support big files.");
- usage();
- goto quit;
- }
-
- /* Clean up the test file */
- if (h5_cleanup(FILENAME, fapl)) remove(DNAME);
-
- /* Test big file with the SEC2 driver */
- puts("Testing big file with the SEC2 Driver ");
-
- fapl = h5_fileaccess();
- if(H5Pset_fapl_sec2(fapl) < 0)
-
- HDmemset(filename, 0, sizeof(filename));
- h5_fixname(FILENAME[1], fapl, filename, sizeof filename);
-
- if (writer(filename, fapl, WRT_N)) goto error;
- if (reader(filename, fapl)) goto error;
-
- puts("Test passed with the SEC2 Driver.");
-
-#ifdef H5_HAVE_FSEEKO
- /* Clean up the test file */
- if (h5_cleanup(FILENAME, fapl)) remove(DNAME);
-
- /* Test big file with the STDIO driver only if fseeko is supported,
- * because the OFFSET parameter of fseek has the type LONG, not big
- * enough to support big files. */
- puts("\nTesting big file with the STDIO Driver ");
-
- fapl = h5_fileaccess();
- if(H5Pset_fapl_stdio(fapl) < 0)
-
- HDmemset(filename, 0, sizeof(filename));
- h5_fixname(FILENAME[2], fapl, filename, sizeof filename);
-
- if (writer(filename, fapl, WRT_N)) goto error;
- if (reader(filename, fapl)) goto error;
- puts("Test passed with the STDIO Driver.");
-#endif
-quit:
/* End with normal exit code */
- if (h5_cleanup(FILENAME, fapl)) remove(DNAME);
return 0;
error:
- if (fapl>=0) H5Pclose(fapl);
puts("*** TEST FAILED ***");
return 1;
}
diff --git a/test/bittests.c b/test/bittests.c
index e435d6c..f063cee 100644
--- a/test/bittests.c
+++ b/test/bittests.c
@@ -908,41 +908,36 @@ test_clear (void)
* Programmer: Robb Matzke
* Tuesday, June 16, 1998
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
int
-main (void)
+main(void)
{
- int nerrors=0;
+ int nerrors = 0;
/*
- * Open the library explicitly for thread-safe builds, so per-thread
- * things are initialized correctly.
+ * Open the library explicitly.
*/
-#ifdef H5_HAVE_THREADSAFE
H5open();
-#endif /* H5_HAVE_THREADSAFE */
-
- nerrors += test_find ()<0?1:0;
- nerrors += test_set ()<0?1:0;
- nerrors += test_clear()<0?1:0;
- nerrors += test_copy ()<0?1:0;
- nerrors += test_shift()<0?1:0;
- nerrors += test_increment ()<0?1:0;
- nerrors += test_decrement ()<0?1:0;
- nerrors += test_negate ()<0?1:0;
-
- if (nerrors) {
+
+ nerrors += test_find() < 0 ? 1 : 0;
+ nerrors += test_set() < 0 ? 1 : 0;
+ nerrors += test_clear() < 0 ? 1 : 0;
+ nerrors += test_copy() < 0 ? 1 : 0;
+ nerrors += test_shift() < 0 ? 1 : 0;
+ nerrors += test_increment() < 0 ? 1 : 0;
+ nerrors += test_decrement() < 0 ? 1 : 0;
+ nerrors += test_negate() < 0 ? 1 : 0;
+
+ if(nerrors) {
printf("***** %u FAILURE%s! *****\n",
- nerrors, 1==nerrors?"":"S");
+ nerrors, 1 == nerrors ? "" : "S");
exit(1);
}
printf("All bit tests passed.\n");
-#ifdef H5_HAVE_THREADSAFE
H5close();
-#endif /* H5_HAVE_THREADSAFE */
+
return 0;
}
+
diff --git a/test/cross_read.c b/test/cross_read.c
index 6b2badf..6588031 100755
--- a/test/cross_read.c
+++ b/test/cross_read.c
@@ -17,8 +17,8 @@
* Programmer: Raymond Lu <slu@ncsa.uiuc.edu>
* Thursday, March 23, 2006
*
- * Purpose: Check if floating-point data created on OpenVMS (VAX type), Solaris,
- * and Linux machines can be read on the machine running this test.
+ * Purpose: Check if floating-point data created on OpenVMS, big-endian, and
+ * little-endian machines can be read on the machine running this test.
*/
#include "h5test.h"
@@ -31,21 +31,63 @@ const char *FILENAME[] = {
NULL
};
-#define DATASETNAME "Array"
-#define NX 5 /* output buffer dimensions */
-#define NY 6
-#define RANK 2
+#define DATASETNAME "Array"
+#define DATASETNAME2 "Scale_offset_float_data_le"
+#define DATASETNAME3 "Scale_offset_float_data_be"
+#define DATASETNAME4 "Scale_offset_double_data_le"
+#define DATASETNAME5 "Scale_offset_double_data_be"
+#define DATASETNAME6 "Scale_offset_char_data_le"
+#define DATASETNAME7 "Scale_offset_char_data_be"
+#define DATASETNAME8 "Scale_offset_short_data_le"
+#define DATASETNAME9 "Scale_offset_short_data_be"
+#define DATASETNAME10 "Scale_offset_int_data_le"
+#define DATASETNAME11 "Scale_offset_int_data_be"
+#define DATASETNAME12 "Scale_offset_long_long_data_le"
+#define DATASETNAME13 "Scale_offset_long_long_data_be"
+#define NX 6
+#define NY 6
+
+/*-------------------------------------------------------------------------
+ * Function: read_data
+ *
+ * Purpose: Read data from a data file.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Raymond Lu
+ * 21 January 2011
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
static int read_data(char *fname)
{
const char *pathname = H5_get_srcdir_filename(fname); /* Corrected test file name */
hid_t file, dataset; /* handles */
- hid_t datatype;
- hid_t dt;
- double data_in[NX][NY]; /* input buffer */
- double data_out[NX][NY]; /* output buffer */
+ double data_in[NX+1][NY]; /* input buffer */
+ double data_out[NX+1][NY]; /* output buffer */
+ long long int_data_in[NX+1][NY]; /* input buffer */
+ long long int_data_out[NX+1][NY]; /* output buffer */
int i, j;
unsigned nerrors = 0;
+ const char *not_supported= " Scaleoffset filter is not enabled.";
+
+ /*
+ * Open the file.
+ */
+ if((file = H5Fopen(pathname, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ TESTING("regular dataset");
+
+ /*
+ * Open the regular dataset.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
/*
* Data and output buffer initialization.
@@ -56,42 +98,277 @@ static int read_data(char *fname)
data_out[j][i] = 0;
}
}
+ for (i = 0; i < NY; i++) {
+ data_in[NX][i] = -2.2;
+ data_out[NX][i] = 0;
+ }
/*
* 0 1 2 3 4 5
* 1 2 3 4 5 6
* 2 3 4 5 6 7
* 3 4 5 6 7 8
* 4 5 6 7 8 9
+ * 5 6 7 8 9 10
+ * -2.2 -2.2 -2.2 -2.2 -2.2 -2.2
*/
/*
- * Open the file and the dataset.
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
*/
- if((file = H5Fopen(pathname, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ if(H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ data_out) < 0)
TEST_ERROR;
- if((dataset = H5Dopen2(file, DATASETNAME, H5P_DEFAULT)) < 0)
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ /* if (data_out[j][i] != data_in[j][i]) { */
+ if (!FLT_ABS_EQUAL(data_out[j][i], data_in[j][i])) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %g but should have been %g\n",
+ j, i, data_out[j][i], data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+
+ TESTING("dataset of LE FLOAT with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME2, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ data_in[j][i] = ((double)(i + j + 1))/3;
+ data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ data_in[NX][i] = -2.2;
+ data_out[NX][i] = 0;
+ }
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
+ */
+ if(H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ data_out) < 0)
+ TEST_ERROR;
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ if (!DBL_REL_EQUAL(data_out[j][i], data_in[j][i], 0.001)) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %g but should have been %g\n",
+ j, i, data_out[j][i], data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ TESTING("dataset of BE FLOAT with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME3, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ data_in[j][i] = ((double)(i + j + 1))/3;
+ data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ data_in[NX][i] = -2.2;
+ data_out[NX][i] = 0;
+ }
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
+ */
+ if(H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ data_out) < 0)
+ TEST_ERROR;
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ if (!DBL_REL_EQUAL(data_out[j][i], data_in[j][i], 0.001)) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %g but should have been %g\n",
+ j, i, data_out[j][i], data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ TESTING("dataset of LE DOUBLE with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME4, H5P_DEFAULT)) < 0)
TEST_ERROR;
/*
- * Get datatype and dataspace handles and then query
- * dataset class, order, size, rank and dimensions.
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ data_in[j][i] = ((double)(i + j + 1))/3;
+ data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ data_in[NX][i] = -2.2;
+ data_out[NX][i] = 0;
+ }
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
*/
- if((dt = H5Dget_type(dataset)) < 0) /* datatype handle */
+ if(H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ data_out) < 0)
TEST_ERROR;
- if((datatype = H5Tget_native_type(dt, H5T_DIR_DEFAULT)) < 0)
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ if (!DBL_REL_EQUAL(data_out[j][i], data_in[j][i], 0.001)) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %g but should have been %g\n",
+ j, i, data_out[j][i], data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ TESTING("dataset of BE DOUBLE with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME5, H5P_DEFAULT)) < 0)
TEST_ERROR;
/*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ data_in[j][i] = ((double)(i + j + 1))/3;
+ data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ data_in[NX][i] = -2.2;
+ data_out[NX][i] = 0;
+ }
+
+ /*
* Read data from hyperslab in the file into the hyperslab in
* memory and display.
*/
- if(H5Dread(dataset, datatype, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_out) < 0)
+ if(H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ data_out) < 0)
TEST_ERROR;
/* Check results */
- for (j=0; j<NX; j++) {
+ for (j=0; j<(NX+1); j++) {
for (i=0; i<NY; i++) {
- if (data_out[j][i] != data_in[j][i]) {
+ if (!DBL_REL_EQUAL(data_out[j][i], data_in[j][i], 0.001)) {
if (!nerrors++) {
H5_FAILED();
printf("element [%d][%d] is %g but should have been %g\n",
@@ -104,10 +381,519 @@ static int read_data(char *fname)
/*
* Close/release resources.
*/
- H5Tclose(dt);
- H5Tclose(datatype);
- H5Dclose(dataset);
- H5Fclose(file);
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ TESTING("dataset of LE CHAR with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME6, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ int_data_in[j][i] = i + j;
+ int_data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ int_data_in[NX][i] = -2;
+ int_data_out[NX][i] = 0;
+ }
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
+ */
+ if(H5Dread(dataset, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ int_data_out) < 0)
+ TEST_ERROR;
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ if (int_data_out[j][i] != int_data_in[j][i]) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %d but should have been %d\n",
+ j, i, (int)int_data_out[j][i],
+ (int)int_data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ TESTING("dataset of BE CHAR with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME7, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ int_data_in[j][i] = i + j;
+ int_data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ int_data_in[NX][i] = -2;
+ int_data_out[NX][i] = 0;
+ }
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
+ */
+ if(H5Dread(dataset, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ int_data_out) < 0)
+ TEST_ERROR;
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ if (int_data_out[j][i] != int_data_in[j][i]) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %d but should have been %d\n",
+ j, i, (int)int_data_out[j][i],
+ (int)int_data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ TESTING("dataset of LE SHORT with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME8, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ int_data_in[j][i] = i + j;
+ int_data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ int_data_in[NX][i] = -2;
+ int_data_out[NX][i] = 0;
+ }
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
+ */
+ if(H5Dread(dataset, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ int_data_out) < 0)
+ TEST_ERROR;
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ if (int_data_out[j][i] != int_data_in[j][i]) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %d but should have been %d\n",
+ j, i, (int)int_data_out[j][i],
+ (int)int_data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ TESTING("dataset of BE SHORT with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME9, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ int_data_in[j][i] = i + j;
+ int_data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ int_data_in[NX][i] = -2;
+ int_data_out[NX][i] = 0;
+ }
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
+ */
+ if(H5Dread(dataset, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ int_data_out) < 0)
+ TEST_ERROR;
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ if (int_data_out[j][i] != int_data_in[j][i]) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %d but should have been %d\n",
+ j, i, (int)int_data_out[j][i],
+ (int)int_data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ TESTING("dataset of LE INT with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME10, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ int_data_in[j][i] = i + j;
+ int_data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ int_data_in[NX][i] = -2;
+ int_data_out[NX][i] = 0;
+ }
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
+ */
+ if(H5Dread(dataset, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ int_data_out) < 0)
+ TEST_ERROR;
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ if (int_data_out[j][i] != int_data_in[j][i]) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %d but should have been %d\n",
+ j, i, (int)int_data_out[j][i],
+ (int)int_data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ TESTING("dataset of BE INT with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME11, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ int_data_in[j][i] = i + j;
+ int_data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ int_data_in[NX][i] = -2;
+ int_data_out[NX][i] = 0;
+ }
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
+ */
+ if(H5Dread(dataset, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ int_data_out) < 0)
+ TEST_ERROR;
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ if (int_data_out[j][i] != int_data_in[j][i]) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %d but should have been %d\n",
+ j, i, (int)int_data_out[j][i],
+ (int)int_data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ TESTING("dataset of LE LONG LONG with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME12, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ int_data_in[j][i] = i + j;
+ int_data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ int_data_in[NX][i] = -2;
+ int_data_out[NX][i] = 0;
+ }
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
+ */
+ if(H5Dread(dataset, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ int_data_out) < 0)
+ TEST_ERROR;
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ if (int_data_out[j][i] != int_data_in[j][i]) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %d but should have been %d\n",
+ j, i, (int)int_data_out[j][i],
+ (int)int_data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Failure */
+ if (nerrors) {
+ printf("total of %d errors out of %d elements\n", nerrors, NX*NY);
+ return 1;
+ }
+
+ PASSED();
+
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ TESTING("dataset of BE LONG LONG with scale-offset filter");
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ /*
+ * Open the dataset with scale-offset filter.
+ */
+ if((dataset = H5Dopen2(file, DATASETNAME13, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++) {
+ int_data_in[j][i] = i + j;
+ int_data_out[j][i] = 0;
+ }
+ }
+ for (i = 0; i < NY; i++) {
+ int_data_in[NX][i] = -2;
+ int_data_out[NX][i] = 0;
+ }
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
+ */
+ if(H5Dread(dataset, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ int_data_out) < 0)
+ TEST_ERROR;
+
+ /* Check results */
+ for (j=0; j<(NX+1); j++) {
+ for (i=0; i<NY; i++) {
+ if (int_data_out[j][i] != int_data_in[j][i]) {
+ if (!nerrors++) {
+ H5_FAILED();
+ printf("element [%d][%d] is %d but should have been %d\n",
+ j, i, (int)int_data_out[j][i],
+ (int)int_data_in[j][i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
/* Failure */
if (nerrors) {
@@ -116,6 +902,14 @@ static int read_data(char *fname)
}
PASSED();
+
+#else /*H5_HAVE_FILTER_SCALEOFFSET*/
+ SKIPPED();
+ puts(not_supported);
+#endif /*H5_HAVE_FILTER_SCALEOFFSET*/
+
+ if(H5Fclose(file))
+ TEST_ERROR
return 0;
error:
@@ -125,6 +919,20 @@ error:
return 1;
}
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Tests the basic features of Virtual File Drivers
+ *
+ * Return: Success: exit(0)
+ * Failure: exit(1)
+ *
+ * Programmer: Raymond Lu
+ * Tuesday, Sept 24, 2002
+ *
+ *-------------------------------------------------------------------------
+ */
int main(void)
{
char filename[1024];
@@ -132,15 +940,15 @@ int main(void)
h5_reset();
- TESTING("reading data created on OpenVMS");
+ puts("Testing reading data created on OpenVMS");
h5_fixname(FILENAME[0], H5P_DEFAULT, filename, sizeof filename);
nerrors += read_data(filename);
- TESTING("reading data created on Linux");
+ puts("Testing reading data created on Linux");
h5_fixname(FILENAME[1], H5P_DEFAULT, filename, sizeof filename);
nerrors += read_data(filename);
- TESTING("reading data created on Solaris");
+ puts("Testing reading data created on Solaris");
h5_fixname(FILENAME[2], H5P_DEFAULT, filename, sizeof filename);
nerrors += read_data(filename);
diff --git a/test/dangle.c b/test/dangle.c
index 2f8a67e..dfa73ba 100644
--- a/test/dangle.c
+++ b/test/dangle.c
@@ -513,6 +513,132 @@ error:
/*-------------------------------------------------------------------------
+ * Function: test_dangle_force
+ *
+ * Purpose: Shut down all danging IDs with generic file & ID routines,
+ * instead of letting library shut then down.
+ *
+ * Return: Success: zero
+ * Failure: non-zero
+ *
+ * Programmer: Quincey Koziol
+ * Friday, October 29, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_dangle_force(void)
+{
+ char filename[1024];
+ hid_t fid; /* File ID */
+ hid_t gid, gid2; /* Group IDs */
+ hid_t dsid, dsid2; /* Dataset IDs */
+ hid_t sid; /* Dataspace ID */
+ hid_t aid, aid2; /* Attribute IDs */
+ hid_t tid, tid2; /* Named datatype IDs */
+ ssize_t count; /* Count of open objects */
+ hid_t *objs = NULL; /* Pointer to list of open objects */
+ size_t u; /* Local index variable */
+
+ TESTING("force dangling IDs to close, from API routines");
+
+ h5_fixname(FILENAME[0], H5P_DEFAULT, filename, sizeof filename);
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a dataspace for the dataset & attribute to use */
+ if((sid = H5Screate(H5S_SCALAR)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a dataset */
+ if((dsid = H5Dcreate2(fid, DSETNAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open the dataset */
+ if((dsid2 = H5Dopen2(fid, DSETNAME, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create an attribute on the dataset */
+ if((aid = H5Acreate2(dsid, ATTRNAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open the attribute */
+ if((aid2 = H5Aopen(dsid, ATTRNAME, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the dataspace ID */
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open a group ID */
+ if((gid = H5Gopen2(fid, "/", H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open group again */
+ if((gid2 = H5Gopen2(fid, "/", H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a named datatype */
+ if((tid = H5Tcopy(H5T_NATIVE_INT)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Tcommit2(fid, TYPENAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open the named datatype */
+ if((tid2 = H5Topen2(fid, TYPENAME, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Increment the ref count on all the "second" objects */
+ if(H5Iinc_ref(dsid2) < 0)
+ FAIL_STACK_ERROR
+ if(H5Iinc_ref(aid2) < 0)
+ FAIL_STACK_ERROR
+ if(H5Iinc_ref(gid2) < 0)
+ FAIL_STACK_ERROR
+ if(H5Iinc_ref(aid2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the number of open objects */
+ if((count = H5Fget_obj_count(H5F_OBJ_ALL, H5F_OBJ_ALL)) < 0)
+ FAIL_STACK_ERROR
+ if(0 == count)
+ TEST_ERROR;
+
+ /* Allocate the array of object IDs */
+ objs = (hid_t*)HDmalloc(sizeof(hid_t) * (size_t)count);
+
+ /* Get the list of open IDs */
+ if(H5Fget_obj_ids(H5F_OBJ_ALL, H5F_OBJ_ALL, (size_t)count, objs) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close all open IDs */
+ for(u = 0; u < (size_t)count; u++)
+ while(H5Iget_type(objs[u]) != H5I_BADID && H5Iget_ref(objs[u]) > 0)
+ H5Idec_ref(objs[u]);
+
+ /* Get the number of open objects */
+ if((count = H5Fget_obj_count(H5F_OBJ_ALL, H5F_OBJ_ALL)) < 0)
+ FAIL_STACK_ERROR
+ if(0 != count)
+ TEST_ERROR;
+
+ /* Clean up temporary file */
+ HDremove(filename);
+
+ /* Release object ID array */
+ HDfree(objs);
+
+ PASSED();
+ return 0;
+
+error:
+ if(objs)
+ HDfree(objs);
+ return 1;
+}
+
+
+/*-------------------------------------------------------------------------
* Function: main
*
* Purpose: Executes dangling ID tests
@@ -556,6 +682,9 @@ main(void)
nerrors += test_dangle_datatype2(H5F_CLOSE_STRONG);
nerrors += test_dangle_attribute(H5F_CLOSE_STRONG);
+ /* Close open IDs "the hard way" */
+ nerrors += test_dangle_force();
+
/* Check for errors */
if (nerrors)
goto error;
diff --git a/test/dsets.c b/test/dsets.c
index 1d831f1..b2b2521 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -53,6 +53,7 @@ const char *FILENAME[] = {
NULL
};
#define FILENAME_BUF_SIZE 1024
+#define KB 1024
#define FILE_DEFLATE_NAME "deflate.h5"
@@ -99,6 +100,8 @@ const char *FILENAME[] = {
#define DSET_NBIT_COMPOUND_NAME "nbit_compound"
#define DSET_NBIT_COMPOUND_NAME_2 "nbit_compound_2"
#define DSET_NBIT_COMPOUND_NAME_3 "nbit_compound_3"
+#define DSET_NBIT_INT_SIZE_NAME "nbit_int_size"
+#define DSET_NBIT_FLT_SIZE_NAME "nbit_flt_size"
#define DSET_SCALEOFFSET_INT_NAME "scaleoffset_int"
#define DSET_SCALEOFFSET_INT_NAME_2 "scaleoffset_int_2"
#define DSET_SCALEOFFSET_FLOAT_NAME "scaleoffset_float"
@@ -3933,6 +3936,384 @@ error:
/*-------------------------------------------------------------------------
+ * Function: test_nbit_int_size
+ *
+ * Purpose: Tests the correct size of the integer datatype for nbit filter
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Raymond Lu
+ * 19 November 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_nbit_int_size(hid_t file)
+{
+#ifdef H5_HAVE_FILTER_NBIT
+ hid_t dataspace, dataset, datatype, mem_datatype, dset_create_props;
+ hsize_t dims[2], chunk_size[2];
+ hsize_t dset_size = 0;
+ int orig_data[DSET_DIM1][DSET_DIM2];
+ int i, j;
+ size_t precision, offset;
+#else /* H5_HAVE_FILTER_NBIT */
+ const char *not_supported= " Nbit is not enabled.";
+#endif /* H5_HAVE_FILTER_NBIT */
+
+ TESTING(" nbit integer dataset size");
+#ifdef H5_HAVE_FILTER_NBIT
+
+ /* Define dataset datatype (integer), and set precision, offset */
+ if((datatype = H5Tcopy(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ printf(" line %d: H5Tcopy failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ precision = 16; /* precision includes sign bit */
+ if(H5Tset_precision(datatype,precision)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pset_precision failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ offset = 8;
+ if(H5Tset_offset(datatype,offset)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Tset_offset failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /* Copy to memory datatype */
+ if((mem_datatype = H5Tcopy(datatype)) < 0) {
+ H5_FAILED();
+ printf(" line %d: H5Tcopy failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /* Set order of dataset datatype */
+ if(H5Tset_order(datatype, H5T_ORDER_BE)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pset_order failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ if(H5Tset_size(datatype, 4)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pset_size failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /* Initiliaze data buffer with random data within correct range
+ * corresponding to the memory datatype's precision and offset.
+ */
+ for (i=0; i < DSET_DIM1; i++)
+ for (j=0; j < DSET_DIM2; j++)
+ orig_data[i][j] = rand() % (int)pow(2, precision-1) <<offset;
+
+
+ /* Describe the dataspace. */
+ dims[0] = DSET_DIM1;
+ dims[1] = DSET_DIM2;
+ if((dataspace = H5Screate_simple (2, dims, NULL))<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pcreate failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /*
+ * Set the dataset creation property list to specify the chunks
+ */
+ chunk_size[0] = DSET_DIM1/10;
+ chunk_size[1] = DSET_DIM2/10;
+ if((dset_create_props = H5Pcreate (H5P_DATASET_CREATE))<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pcreate failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ if(H5Pset_chunk (dset_create_props, 2, chunk_size)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pset_chunk failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /*
+ * Set for n-bit compression
+ */
+ if(H5Pset_nbit (dset_create_props)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pset_nbit failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /*
+ * Create a new dataset within the file.
+ */
+ if((dataset = H5Dcreate2 (file, DSET_NBIT_INT_SIZE_NAME, datatype,
+ dataspace, H5P_DEFAULT,
+ dset_create_props, H5P_DEFAULT))<0) {
+ H5_FAILED();
+ printf(" line %d: H5dwrite failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /*
+ * Write the array to the file.
+ */
+ if(H5Dwrite (dataset, mem_datatype, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, orig_data)<0) {
+ H5_FAILED();
+ printf(" Line %d: H5Dwrite failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /*
+ * Get the precision of the data type
+ */
+ if((precision = H5Tget_precision(datatype)) == 0) {
+ H5_FAILED();
+ printf(" Line %d: wrong precision size: %d\n",__LINE__, precision);
+ goto error;
+ } /* end if */
+
+ /*
+ * The size of the dataset after compression should around 2 * DSET_DIM1 * DSET_DIM2
+ */
+ if((dset_size = H5Dget_storage_size(dataset)) < DSET_DIM1*DSET_DIM2*(precision/8) ||
+ dset_size > DSET_DIM1*DSET_DIM2*(precision/8) + 1*KB) {
+ H5_FAILED();
+ printf(" Line %d: wrong dataset size: %d\n",__LINE__, dset_size);
+ goto error;
+ } /* end if */
+
+ H5Tclose (datatype);
+ H5Tclose (mem_datatype);
+ H5Dclose (dataset);
+ H5Sclose (dataspace);
+ H5Pclose (dset_create_props);
+
+ PASSED();
+#else
+ SKIPPED();
+ puts(not_supported);
+#endif
+
+ return 0;
+error:
+ return -1;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_nbit_flt_size
+ *
+ * Purpose: Tests the correct size of the floating-number datatype for
+ * nbit filter
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Raymond Lu
+ * 19 November 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_nbit_flt_size(hid_t file)
+{
+#ifdef H5_HAVE_FILTER_NBIT
+ hid_t dataspace, dataset, datatype, dset_create_props;
+ hsize_t dims[2], chunk_size[2];
+ hsize_t dset_size = 0;
+ float orig_data[DSET_DIM1][DSET_DIM2];
+ int i, j;
+ size_t precision, offset;
+ size_t spos, epos, esize, mpos, msize;
+#else /* H5_HAVE_FILTER_NBIT */
+ const char *not_supported= " Nbit is not enabled.";
+#endif /* H5_HAVE_FILTER_NBIT */
+
+ TESTING(" nbit floating-number dataset size");
+#ifdef H5_HAVE_FILTER_NBIT
+
+ /* Define floating-point type for dataset
+ *-------------------------------------------------------------------
+ * size=4 byte, precision=16 bits, offset=8 bits,
+ * mantissa size=9 bits, mantissa position=8,
+ * exponent size=6 bits, exponent position=17,
+ * exponent bias=31.
+ * It can be illustrated in little-endian order as:
+ * (S - sign bit, E - exponent bit, M - mantissa bit,
+ * ? - padding bit)
+ *
+ * 3 2 1 0
+ * ???????? SEEEEEEM MMMMMMMM ????????
+ *
+ * To create a new floating-point type, the following
+ * properties must be set in the order of
+ * set fields -> set offset -> set precision -> set size.
+ * All these properties must be set before the type can function.
+ * Other properties can be set anytime. Derived type size cannot
+ * be expanded bigger than original size but can be decreased.
+ * There should be no holes among the significant bits. Exponent
+ * bias usually is set 2^(n-1)-1, where n is the exponent size.
+ *-------------------------------------------------------------------*/
+ if((datatype = H5Tcopy(H5T_IEEE_F32LE)) < 0) {
+ H5_FAILED();
+ printf(" line %d: H5Tcopy failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ msize = 9;
+ spos = 23;
+ epos = 17;
+ esize = 6;
+ mpos = 8;
+ offset = 8;
+ precision = 16;
+
+ if(H5Tset_fields(datatype, spos, epos, esize, mpos, msize)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Tset_fields failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ if(H5Tset_offset(datatype,offset)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Tset_offset failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ if(H5Tset_precision(datatype,precision)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Tset_precision failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ if(H5Tset_size(datatype, 4)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pset_size failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /* Set order of dataset datatype */
+ if(H5Tset_order(datatype, H5T_ORDER_BE)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pset_order failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ if(H5Tset_ebias(datatype, 31)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pset_size failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /*
+ * Initiliaze data buffer with random data
+ */
+ for (i=0; i < DSET_DIM1; i++)
+ for (j=0; j < DSET_DIM2; j++)
+ orig_data[i][j] = (rand() % 1234567) / 2;
+
+
+ /* Describe the dataspace. */
+ dims[0] = DSET_DIM1;
+ dims[1] = DSET_DIM2;
+ if((dataspace = H5Screate_simple (2, dims, NULL))<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pcreate failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /*
+ * Set the dataset creation property list to specify the chunks
+ */
+ chunk_size[0] = DSET_DIM1/10;
+ chunk_size[1] = DSET_DIM2/10;
+ if((dset_create_props = H5Pcreate (H5P_DATASET_CREATE))<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pcreate failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ if(H5Pset_chunk (dset_create_props, 2, chunk_size)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pset_chunk failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /*
+ * Set for n-bit compression
+ */
+ if(H5Pset_nbit (dset_create_props)<0) {
+ H5_FAILED();
+ printf(" line %d: H5Pset_nbit failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /*
+ * Create a new dataset within the file.
+ */
+ if((dataset = H5Dcreate2 (file, DSET_NBIT_FLT_SIZE_NAME, datatype,
+ dataspace, H5P_DEFAULT,
+ dset_create_props, H5P_DEFAULT))<0) {
+ H5_FAILED();
+ printf(" line %d: H5dwrite failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /*
+ * Write the array to the file.
+ */
+ if(H5Dwrite (dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, orig_data)<0) {
+ H5_FAILED();
+ printf(" Line %d: H5Dwrite failed\n",__LINE__);
+ goto error;
+ } /* end if */
+
+ /*
+ * Get the precision of the data type
+ */
+ if((precision = H5Tget_precision(datatype)) == 0) {
+ H5_FAILED();
+ printf(" Line %d: wrong precision size: %d\n",__LINE__, precision);
+ goto error;
+ } /* end if */
+
+ /*
+ * The size of the dataset after compression should around 2 * DSET_DIM1 * DSET_DIM2
+ */
+ if((dset_size = H5Dget_storage_size(dataset)) < DSET_DIM1*DSET_DIM2*(precision/8) ||
+ dset_size > DSET_DIM1*DSET_DIM2*(precision/8) + 1*KB) {
+ H5_FAILED();
+ printf(" Line %d: wrong dataset size: %d\n",__LINE__, dset_size);
+ goto error;
+ } /* end if */
+
+ H5Tclose (datatype);
+ H5Dclose (dataset);
+ H5Sclose (dataspace);
+ H5Pclose (dset_create_props);
+
+ PASSED();
+#else
+ SKIPPED();
+ puts(not_supported);
+#endif
+
+ return 0;
+error:
+ return -1;
+}
+
+/*-------------------------------------------------------------------------
* Function: test_scaleoffset_int
*
* Purpose: Tests the integer datatype for scaleoffset filter
@@ -7770,6 +8151,8 @@ main(void)
nerrors += (test_nbit_compound(file) < 0 ? 1 : 0);
nerrors += (test_nbit_compound_2(file) < 0 ? 1 : 0);
nerrors += (test_nbit_compound_3(file) < 0 ? 1 : 0);
+ nerrors += (test_nbit_int_size(file) < 0 ? 1 : 0);
+ nerrors += (test_nbit_flt_size(file) < 0 ? 1 : 0);
nerrors += (test_scaleoffset_int(file) < 0 ? 1 : 0);
nerrors += (test_scaleoffset_int_2(file) < 0 ? 1 : 0);
nerrors += (test_scaleoffset_float(file) < 0 ? 1 : 0);
diff --git a/test/dt_arith.c b/test/dt_arith.c
index 8d4b65b..078242d 100644
--- a/test/dt_arith.c
+++ b/test/dt_arith.c
@@ -5107,9 +5107,25 @@ run_int_fp_conv(const char *name)
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_INT, H5T_NATIVE_LDOUBLE);
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_UINT, H5T_NATIVE_LDOUBLE);
#if H5_SIZEOF_LONG!=H5_SIZEOF_INT
+#ifndef H5_LONG_TO_LDOUBLE_SPECIAL
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_LONG, H5T_NATIVE_LDOUBLE);
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_ULONG, H5T_NATIVE_LDOUBLE);
+#else
+ {
+ char str[256]; /*string */
+
+ sprintf(str, "Testing %s %s -> %s conversions",
+ name, "(unsigned) long", "long double");
+ printf("%-70s", str);
+ SKIPPED();
+#if H5_SIZEOF_LONG_DOUBLE!=0
+ HDputs(" Test skipped due to the special algorithm of hardware conversion.");
+#else
+ HDputs(" Test skipped due to disabled long double.");
#endif
+ }
+#endif
+#endif /* H5_SIZEOF_LONG!=H5_SIZEOF_INT */
#if H5_SIZEOF_LONG_LONG!=H5_SIZEOF_LONG
#if H5_LLONG_TO_LDOUBLE_CORRECT
nerrors += test_conv_int_fp(name, TEST_NORMAL, H5T_NATIVE_LLONG, H5T_NATIVE_LDOUBLE);
@@ -5177,8 +5193,8 @@ run_int_fp_conv(const char *name)
static int
run_fp_int_conv(const char *name)
{
-#ifdef H5_FP_TO_INTEGER_OVERFLOW_WORKS
int nerrors = 0;
+#ifdef H5_FP_TO_INTEGER_OVERFLOW_WORKS
int test_values;
#ifdef H5_VMS
@@ -5272,9 +5288,25 @@ run_fp_int_conv(const char *name)
}
#endif /*H5_LDOUBLE_TO_UINT_ACCURATE*/
#if H5_SIZEOF_LONG!=H5_SIZEOF_INT && H5_SIZEOF_LONG_DOUBLE!=0
+#ifndef H5_LDOUBLE_TO_LONG_SPECIAL
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_LDOUBLE, H5T_NATIVE_LONG);
nerrors += test_conv_int_fp(name, test_values, H5T_NATIVE_LDOUBLE, H5T_NATIVE_ULONG);
+#else
+ {
+ char str[256]; /*string */
+
+ sprintf(str, "Testing %s %s -> %s conversions",
+ name, "long double", "(unsigned) long");
+ printf("%-70s", str);
+ SKIPPED();
+#if H5_SIZEOF_LONG_DOUBLE!=0
+ HDputs(" Test skipped due to the special algorithm of hardware conversion.");
+#else
+ HDputs(" Test skipped due to disabled long double.");
+#endif
+ }
#endif
+#endif /*H5_SIZEOF_LONG!=H5_SIZEOF_INT && H5_SIZEOF_LONG_DOUBLE!=0 */
#if H5_SIZEOF_LONG_LONG!=H5_SIZEOF_LONG && H5_SIZEOF_LONG_DOUBLE!=0
#ifdef H5_LDOUBLE_TO_LLONG_ACCURATE
diff --git a/test/err_compat.c b/test/err_compat.c
index be86a2d..c08e259 100644
--- a/test/err_compat.c
+++ b/test/err_compat.c
@@ -42,20 +42,187 @@ int ipoints2[DIM0][DIM1], icheck2[DIM0][DIM1];
#define DSET_NAME "a_dataset"
#define FAKE_ID -1
-herr_t custom_print_cb(int n, H5E_error1_t *err_desc, void* client_data);
+herr_t custom_print_cb1(int n, H5E_error1_t *err_desc, void* client_data);
+herr_t custom_print_cb2(int n, H5E_error2_t *err_desc, void* client_data);
/*-------------------------------------------------------------------------
- * Function: test_error
+ * Function: user_print1
*
- * Purpose: Test error API functions
+ * Purpose: This function is a user-defined old-style printing function.
+ * This is just a convenience function for H5Ewalk1() with a
+ * function that prints error messages.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Raymond Lu
+ * 4 October 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+user_print1(FILE *stream)
+{
+ /* Customized way to print errors */
+ fprintf(stderr, "\n********* Print error stack in customized way *********\n");
+ if(H5Ewalk1(H5E_WALK_UPWARD, (H5E_walk1_t)custom_print_cb1, stream) < 0)
+ TEST_ERROR;
+
+ return 0;
+
+ error:
+ return -1;
+
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: user_print2
+ *
+ * Purpose: This function is a user-defined new-style printing function.
+ * This is just a convenience function for H5Ewalk2() with a
+ * function that prints error messages.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Raymond Lu
+ * 4 October 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+user_print2(hid_t err_stack, FILE *stream)
+{
+ /* Customized way to print errors */
+ fprintf(stderr, "\n********* Print error stack in customized way *********\n");
+ if(H5Ewalk2(err_stack, H5E_WALK_UPWARD, (H5E_walk2_t)custom_print_cb2, stream) < 0)
+ TEST_ERROR;
+
+ return 0;
+
+ error:
+ return -1;
+
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: custom_print_cb1
+ *
+ * Purpose: Callback function to print error stack in customized way
+ * for H5Ewalk1.
*
* Return: Success: 0
*
* Failure: -1
*
* Programmer: Raymond Lu
- * July 10, 2003
+ * 4 October 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+custom_print_cb1(int n, H5E_error1_t *err_desc, void* client_data)
+{
+ FILE *stream = (FILE *)client_data;
+ char *maj = NULL;
+ char *min = NULL;
+ const int indent = 4;
+
+ if(NULL == (min = H5Eget_minor(err_desc->min_num)))
+ TEST_ERROR;
+
+ if(NULL == (maj = H5Eget_major(err_desc->maj_num)))
+ TEST_ERROR;
+
+ fprintf(stream, "%*serror #%03d: %s in %s(): line %u\n",
+ indent, "", n, err_desc->file_name,
+ err_desc->func_name, err_desc->line);
+
+ fprintf(stream, "%*smajor: %s\n", indent * 2, "", maj);
+ fprintf(stream, "%*sminor: %s\n", indent * 2, "", min);
+
+ HDfree(maj);
+ HDfree(min);
+
+ return 0;
+
+error:
+ if(maj)
+ HDfree(maj);
+ if(min)
+ HDfree(min);
+
+ return -1;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: custom_print_cb2
+ *
+ * Purpose: Callback function to print error stack in customized way
+ * for H5Ewalk1.
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Raymond Lu
+ * 4 October 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+custom_print_cb2(int n, H5E_error2_t *err_desc, void* client_data)
+{
+ FILE *stream = (FILE *)client_data;
+ char *maj = NULL;
+ char *min = NULL;
+ const int indent = 4;
+
+ if(NULL == (min = H5Eget_minor(err_desc->min_num)))
+ TEST_ERROR;
+
+ if(NULL == (maj = H5Eget_major(err_desc->maj_num)))
+ TEST_ERROR;
+
+ fprintf(stream, "%*serror #%03d: %s in %s(): line %u\n",
+ indent, "", n, err_desc->file_name,
+ err_desc->func_name, err_desc->line);
+
+ fprintf(stream, "%*smajor: %s\n", indent * 2, "", maj);
+ fprintf(stream, "%*sminor: %s\n", indent * 2, "", min);
+
+ HDfree(maj);
+ HDfree(min);
+
+ return 0;
+
+error:
+ if(maj)
+ HDfree(maj);
+ if(min)
+ HDfree(min);
+
+ return -1;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_error1
+ *
+ * Purpose: Test the backward compatibility of H5Eset/get_auto.
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Raymond Lu
+ * 17 September 2010
*
*
* Modifications:
@@ -63,13 +230,143 @@ herr_t custom_print_cb(int n, H5E_error1_t *err_desc, void* client_data);
*-------------------------------------------------------------------------
*/
static herr_t
-test_error(hid_t file)
+test_error1(void)
{
hid_t dataset, space;
hsize_t dims[2];
- const char *FUNC_test_error="test_error";
- H5E_auto1_t old_func;
+ H5E_auto1_t old_func1;
+ H5E_auto2_t old_func2;
void *old_data;
+ herr_t ret;
+
+ TESTING("error API H5Eset/get_auto");
+ fprintf(stderr, "\n");
+
+ /* Create the data space */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ if ((space = H5Screate_simple(2, dims, NULL))<0) TEST_ERROR;
+
+ /* Use H5Eget_auto2 to query the default printing function. The library
+ *should indicate H5Eprint2 as the default. */
+ if (H5Eget_auto2(H5E_DEFAULT, &old_func2, &old_data)<0)
+ TEST_ERROR;
+ if (old_data != NULL)
+ TEST_ERROR;
+ if (!old_func2 || (H5E_auto2_t)H5Eprint2 != old_func2)
+ TEST_ERROR;
+
+ /* This function sets the default printing function to be H5Eprint2. */
+ if(H5Eset_auto2(H5E_DEFAULT, old_func2, old_data)<0)
+ TEST_ERROR;
+
+ /* Try the printing function. Dataset creation should fail because the file
+ * doesn't exist. */
+ dataset = H5Dcreate2(FAKE_ID, DSET_NAME, H5T_STD_I32BE, space, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+ if(dataset >= 0)
+ TEST_ERROR;
+
+ /* This call should work. It simply returns H5Eprint1. */
+ if((ret = H5Eget_auto1(&old_func1, &old_data))<0)
+ TEST_ERROR;
+ if (old_data != NULL)
+ TEST_ERROR;
+ if (!old_func1 || (H5E_auto1_t)H5Eprint1 != old_func1)
+ TEST_ERROR;
+
+ /* This function changes the old-style printing function to be user_print1. */
+ if(H5Eset_auto1((H5E_auto1_t)user_print1, stderr)<0)
+ TEST_ERROR;
+
+ /* Try the printing function. Dataset creation should fail because the file
+ * doesn't exist. */
+ dataset = H5Dcreate2(FAKE_ID, DSET_NAME, H5T_STD_I32BE, space, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+ if(dataset >= 0)
+ TEST_ERROR;
+
+ /* This call should fail because the test mixes H5Eget_auto2 with H5Eset_auto1.
+ * Once the H5Eset_auto1 is called with a user-defined printing function,
+ * a call to H5Eget_auto2 will fail. But keep in mind the printing function is
+ * user_print1. */
+ if((ret = H5Eget_auto2(H5E_DEFAULT, &old_func2, &old_data))>=0)
+ TEST_ERROR;
+
+ /* This function changes the new-style printing function to be user_print2. */
+ if(H5Eset_auto2(H5E_DEFAULT, (H5E_auto2_t)user_print2, stderr)<0)
+ TEST_ERROR;
+
+ /* Try the printing function. Dataset creation should fail because the file
+ * doesn't exist. */
+ dataset = H5Dcreate2(FAKE_ID, DSET_NAME, H5T_STD_I32BE, space, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+ if(dataset >= 0)
+ TEST_ERROR;
+
+ /* This function changes the new-style printing function back to the default H5Eprint2. */
+ if(H5Eset_auto2(H5E_DEFAULT, (H5E_auto2_t)H5Eprint2, NULL)<0)
+ TEST_ERROR;
+
+ /* This call should work because the H5Eset_auto2 above restored the default printing
+ * function H5Eprint2. It simply returns user_print1. */
+ if((ret = H5Eget_auto1(&old_func1, &old_data))<0)
+ TEST_ERROR;
+ if (old_data != NULL)
+ TEST_ERROR;
+ if (!old_func1 || (H5E_auto1_t)user_print1 != old_func1)
+ TEST_ERROR;
+
+ /* This function changes the new-style printing function back to the default H5Eprint1. */
+ if(H5Eset_auto1((H5E_auto1_t)H5Eprint1, NULL)<0)
+ TEST_ERROR;
+
+ /* This call should work because the H5Eset_auto1 above restored the default printing
+ * function H5Eprint1. It simply returns H5Eprint2. */
+ if((ret = H5Eget_auto2(H5E_DEFAULT, &old_func2, &old_data))<0)
+ TEST_ERROR;
+ if (old_data != NULL)
+ TEST_ERROR;
+ if (!old_func2 || (H5E_auto2_t)H5Eprint2 != old_func2)
+ TEST_ERROR;
+
+ /* Try the printing function. Dataset creation should fail because the file
+ * doesn't exist. */
+ dataset = H5Dcreate2(FAKE_ID, DSET_NAME, H5T_STD_I32BE, space, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+ if(dataset >= 0)
+ TEST_ERROR;
+
+ return 0;
+
+ error:
+ return -1;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_error2
+ *
+ * Purpose: Test error API functions, mainly on H5Epush1.
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Raymond Lu
+ * July 10, 2003
+ *
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_error2(hid_t file)
+{
+ hid_t dataset, space;
+ hsize_t dims[2];
+ const char *FUNC_test_error="test_error2";
TESTING("error API based on data I/O");
fprintf(stderr, "\n");
@@ -92,22 +389,12 @@ test_error(hid_t file)
goto error;
}
- /* Test enabling and disabling default printing */
- if (H5Eget_auto1(&old_func, &old_data)<0)
- TEST_ERROR;
- if (old_data != NULL)
- TEST_ERROR;
- if (!old_func)
- TEST_ERROR;
-#ifdef H5_USE_16_API
- if (old_func != (H5E_auto1_t)H5Eprint1)
- TEST_ERROR;
-#else /* H5_USE_16_API */
- if (old_func != (H5E_auto1_t)H5Eprint2)
- TEST_ERROR;
-#endif /* H5_USE_16_API */
-
- if(H5Eset_auto1(NULL, NULL)<0)
+ /* Disable the library's default printing function */
+#ifdef H5_USE_16_API_DEFAULT
+ if(H5Eset_auto(NULL, NULL)<0)
+#else
+ if(H5Eset_auto(H5E_DEFAULT, NULL, NULL)<0)
+#endif
TEST_ERROR;
/* Make H5Dwrite fail, verify default print is disabled */
@@ -117,9 +404,6 @@ test_error(hid_t file)
goto error;
}
- if(H5Eset_auto1(old_func, old_data)<0)
- TEST_ERROR;
-
/* In case program comes to this point, close dataset */
if(H5Dclose(dataset)<0) TEST_ERROR;
@@ -157,7 +441,7 @@ dump_error(void)
/* Customized way to print errors */
fprintf(stderr, "\n********* Print error stack in customized way *********\n");
- if(H5Ewalk1(H5E_WALK_UPWARD, custom_print_cb, stderr) < 0)
+ if(H5Ewalk1(H5E_WALK_UPWARD, custom_print_cb1, stderr) < 0)
TEST_ERROR;
return 0;
@@ -166,57 +450,6 @@ dump_error(void)
return -1;
}
-/*-------------------------------------------------------------------------
- * Function: custom_print_cb
- *
- * Purpose: Callback function to print error stack in customized way.
- *
- * Return: Success: 0
- *
- * Failure: -1
- *
- * Programmer: Raymond Lu
- * July 17, 2003
- *
- *
- * Modifications:
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-custom_print_cb(int n, H5E_error1_t *err_desc, void* client_data)
-{
- FILE *stream = (FILE *)client_data;
- char *maj = NULL;
- char *min = NULL;
- const int indent = 4;
-
- if(NULL == (min = H5Eget_minor(err_desc->min_num)))
- TEST_ERROR;
-
- if(NULL == (maj = H5Eget_major(err_desc->maj_num)))
- TEST_ERROR;
-
- fprintf(stream, "%*serror #%03d: %s in %s(): line %u\n",
- indent, "", n, err_desc->file_name,
- err_desc->func_name, err_desc->line);
-
- fprintf(stream, "%*smajor: %s\n", indent * 2, "", maj);
- fprintf(stream, "%*sminor: %s\n", indent * 2, "", min);
-
- HDfree(maj);
- HDfree(min);
-
- return 0;
-
-error:
- if(maj)
- HDfree(maj);
- if(min)
- HDfree(min);
-
- return -1;
-}
/*-------------------------------------------------------------------------
@@ -258,7 +491,9 @@ main(void)
H5Eclear1();
/* Test error API */
- if(test_error(file) < 0) {
+ if(test_error1() < 0) TEST_ERROR ;
+
+ if(test_error2(file) < 0) {
H5Epush1(__FILE__, FUNC_main, __LINE__, H5E_ERROR, H5E_BADMESG,
"Error test failed");
H5Eprint1(stderr);
@@ -275,4 +510,3 @@ main(void)
return 1;
}
#endif /* H5_NO_DEPRECATED_SYMBOLS */
-
diff --git a/test/external.c b/test/external.c
index 7fd344e..3e1388c 100644
--- a/test/external.c
+++ b/test/external.c
@@ -879,7 +879,7 @@ test_4 (hid_t fapl)
if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
goto error;
- /* Open the external link */
+ /* Open the external link which is "/ link" as created previously via H5Lcreate_external() */
if((xid = H5Gopen2(fid, "/ link", H5P_DEFAULT)) < 0)
goto error;
diff --git a/test/fheap.c b/test/fheap.c
index e38302c..3d8e69d 100644
--- a/test/fheap.c
+++ b/test/fheap.c
@@ -7621,14 +7621,14 @@ test_man_remove_root_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "removing all objects from root direct block of absolute heap %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill the heap up */
state.man_size = DBLOCK_SIZE(fh, 0);
@@ -7695,14 +7695,14 @@ test_man_remove_two_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "removing all objects from two direct blocks of absolute heap %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill the first block in heap */
state.man_size = DBLOCK_SIZE(fh, 0);
@@ -7784,14 +7784,14 @@ test_man_remove_first_row(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "removing all objects from first row of direct blocks of absolute heap %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill first row of direct blocks */
if(fill_root_row(fh, dxpl, 0, fill_size, &state, &keep_ids))
@@ -7855,14 +7855,14 @@ test_man_remove_first_two_rows(hid_t fapl, H5HF_create_t *cparam, fheap_test_par
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "removing all objects from first two rows of direct blocks of absolute heap %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill first two rows of direct blocks */
if(fill_root_row(fh, dxpl, 0, fill_size, &state, &keep_ids))
@@ -7928,14 +7928,14 @@ test_man_remove_first_four_rows(hid_t fapl, H5HF_create_t *cparam, fheap_test_pa
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "removing all objects from first four rows of direct blocks of absolute heap %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill first two rows of direct blocks */
if(fill_root_row(fh, dxpl, 0, fill_size, &state, &keep_ids))
@@ -8005,14 +8005,14 @@ test_man_remove_all_root_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_pa
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "removing all objects from all direct blocks of root group in absolute heap %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill direct blocks in root indirect block */
if(fill_root_direct(fh, dxpl, fill_size, &state, &keep_ids))
@@ -8076,14 +8076,14 @@ test_man_remove_2nd_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_test_param
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "removing all objects from 2nd level indirect blocks of absolute heap %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill direct blocks in root indirect block */
if(fill_root_direct(fh, dxpl, fill_size, &state, &keep_ids))
@@ -8151,14 +8151,14 @@ test_man_remove_3rd_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_test_param
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "removing all objects from 3rd level indirect blocks of absolute heap %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill direct blocks in root indirect block */
if(fill_root_direct(fh, dxpl, fill_size, &state, &keep_ids))
@@ -8235,14 +8235,14 @@ test_man_skip_start_block(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "inserting object that is too large for starting block, then remove all objects %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
obj_size = (size_t)DBLOCK_SIZE(fh, 0) + 1;
state.man_size = cparam->managed.width * DBLOCK_SIZE(fh, 0);
state.man_size += cparam->managed.width * DBLOCK_SIZE(fh, 1);
@@ -8313,14 +8313,14 @@ test_man_skip_start_block_add_back(hid_t fapl, H5HF_create_t *cparam, fheap_test
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "skipping starting block, then adding object back to first block, then remove all objects %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Insert object too large for starting block size */
obj_size = (size_t)DBLOCK_SIZE(fh, 0) + 1;
@@ -8413,14 +8413,14 @@ test_man_skip_start_block_add_skipped(hid_t fapl, H5HF_create_t *cparam, fheap_t
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "skipping starting block, then adding objects to backfill and extend, then remove all objects %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Insert object too large for starting block size */
obj_size = (size_t)DBLOCK_SIZE(fh, 0) + 1;
@@ -8522,14 +8522,14 @@ test_man_skip_2nd_block(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *t
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert object to initial block, then add object too large for starting direct blocks, then remove all objects %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Insert small object, to create root direct block */
state.man_size = DBLOCK_SIZE(fh, 0);
@@ -8621,14 +8621,14 @@ test_man_skip_2nd_block_add_skipped(hid_t fapl, H5HF_create_t *cparam, fheap_tes
const char *base_desc = "insert object to initial block, then add object too large for starting direct blocks, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Insert small object, to create root direct block */
state.man_size = DBLOCK_SIZE(fh, 0);
@@ -8766,14 +8766,14 @@ test_man_fill_one_partial_skip_2nd_block_add_skipped(hid_t fapl, H5HF_create_t *
const char *base_desc = "skipping blocks with indirect root, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u; /* Local index variable */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill initial direct block */
state.man_size = DBLOCK_SIZE(fh, 0);
@@ -8931,14 +8931,14 @@ test_man_fill_row_skip_add_skipped(hid_t fapl, H5HF_create_t *cparam, fheap_test
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "filling first row, then skipping rows, then backfill and extend, then remove all objects %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill first row of direct blocks */
if(fill_root_row(fh, dxpl, 0, fill_size, &state, &keep_ids))
@@ -9060,14 +9060,14 @@ test_man_skip_direct_skip_indirect_two_rows_add_skipped(hid_t fapl, H5HF_create_
const char *base_desc = "skipping direct blocks to last row and skipping two rows of root indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Compute # direct block rows in root indirect block */
num_direct_rows = DTABLE_MAX_DROWS(fh);
@@ -9185,14 +9185,14 @@ test_man_fill_direct_skip_indirect_start_block_add_skipped(hid_t fapl, H5HF_crea
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "filling direct blocks and skipping blocks in non-root indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill direct blocks in root indirect block */
if(fill_root_direct(fh, dxpl, fill_size, &state, &keep_ids))
@@ -9312,14 +9312,14 @@ test_man_fill_direct_skip_2nd_indirect_start_block_add_skipped(hid_t fapl, H5HF_
const char *base_desc = "filling direct blocks and skipping row of non-root indirect blocks, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u; /* Local index variable */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -9440,14 +9440,14 @@ test_man_fill_2nd_direct_less_one_wrap_start_block_add_skipped(hid_t fapl, H5HF_
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, except last one, and insert object too large for 2nd level indirect blocks, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -9587,14 +9587,14 @@ test_man_fill_direct_skip_2nd_indirect_skip_2nd_block_add_skipped(hid_t fapl, H5
const char *base_desc = "filling direct blocks and skipping row of non-root indirect blocks, then skip row of direct blocks, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u; /* Local index variable */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -9757,14 +9757,14 @@ test_man_fill_direct_skip_indirect_two_rows_add_skipped(hid_t fapl, H5HF_create_
const char *base_desc = "filling direct blocks and skipping two rows of root indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -9911,14 +9911,14 @@ test_man_fill_direct_skip_indirect_two_rows_skip_indirect_row_add_skipped(hid_t
const char *base_desc = "filling direct blocks and skipping two rows of root indirect block, skip one row of root indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -10088,14 +10088,14 @@ test_man_fill_2nd_direct_skip_start_block_add_skipped(hid_t fapl, H5HF_create_t
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, and skip first rows of direct blocks of 3rd level indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill direct blocks in root indirect block */
if(fill_root_direct(fh, dxpl, fill_size, &state, &keep_ids))
@@ -10217,14 +10217,14 @@ test_man_fill_2nd_direct_skip_2nd_indirect_start_block_add_skipped(hid_t fapl, H
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect block's direct blocks, and skip first rows of direct blocks of 3rd level indirect block's 2nd level indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Fill direct blocks in root indirect block */
if(fill_root_direct(fh, dxpl, fill_size, &state, &keep_ids))
@@ -10357,14 +10357,14 @@ test_man_fill_2nd_direct_fill_direct_skip_3rd_indirect_start_block_add_skipped(h
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect block's direct blocks, and skip first row of indirect blocks of 3rd level indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -10509,14 +10509,14 @@ test_man_fill_2nd_direct_fill_direct_skip2_3rd_indirect_start_block_add_skipped(
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect block's direct blocks, and skip first two rows of indirect blocks of 3rd level indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -10672,14 +10672,14 @@ test_man_fill_3rd_direct_less_one_fill_direct_wrap_start_block_add_skipped(hid_t
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling first row of 3rd level indirect blocks, except last one, fill all direct blocks in last 3rd level indirect block, and insert object too large for it's 2nd level indirect blocks, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -10835,14 +10835,14 @@ test_man_fill_1st_row_3rd_direct_fill_2nd_direct_less_one_wrap_start_block_add_s
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling first row of 3rd level indirect blocks, fill all direct blocks in next 3rd level indirect block, fill all 1st row of 2nd level indirect blocks, except last one, and insert object too large for 2nd level indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -11002,14 +11002,14 @@ test_man_fill_3rd_direct_fill_direct_skip_start_block_add_skipped(hid_t fapl, H5
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect blocks, fill 4th level indirect block's direct blocks, and skip first row of 2nd indirect blocks of 4th level indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -11164,14 +11164,14 @@ test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_start_blo
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect blocks, fill 4th level indirect block's direct, 2nd level indirect blocks and 3rd level direct block, and skip first row of 2nd indirect blocks of 4th level indirect block's 3rd level indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -11344,14 +11344,14 @@ test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_two_rows_
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect blocks, fill first row of 4th level indirect blocks, fill 2nd row 4th level indirect block's direct, 2nd level indirect blocks, first row of 3rd level indirect blocks, 3rd level direct block in 2nd row, and skip first row of 2nd indirect blocks of 4th level indirect block's 3rd level indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -11559,14 +11559,14 @@ test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_wrap_star
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect blocks, fill first row of 3rd level indirect blocks in 4th level indirect block except last 3rd level block, fill direct blocks in 3rd level block, and skip row of 2nd indirect blocks of 4th level indirect block's 3rd level indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -11756,14 +11756,14 @@ test_man_fill_4th_direct_less_one_fill_2nd_direct_fill_direct_skip_3rd_indirect_
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect blocks, fill first row of 4th level indirect blocks, except last one, fill first row of 3rd level indirect blocks in last 4th level indirect block except last 3rd level block, fill direct blocks in 3rd level block, and skip row of 2nd indirect blocks of 4th level indirect block's 3rd level indirect block, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve info about heap */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -11982,14 +11982,14 @@ test_man_frag_simple(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
const char *base_desc = "fragmenting small blocks, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Insert objects small enough to fit into initial blocks, but not to
* share them with other objects of the same size, until the next larger
@@ -12115,14 +12115,14 @@ test_man_frag_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
const char *base_desc = "fragmenting direct blocks, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Retrieve # of direct rows in root indirect block */
root_direct_rows = H5HF_get_dtable_max_drows_test(fh);
@@ -12288,14 +12288,14 @@ test_man_frag_2nd_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *
const char *base_desc = "fill root direct blocks, then fragment 2nd level indirect block's direct blocks, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Compute # of bits used in first row */
num_first_indirect_rows = IBLOCK_MAX_DROWS(fh, 1);
@@ -12402,14 +12402,14 @@ test_man_frag_3rd_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *
const char *base_desc = "fill root direct blocks and 2nd level indirect blocks, then fragment 3rd level indirect block's direct blocks, then backfill and extend, then remove all objects %s"; /* Test description */
unsigned u, v; /* Local index variables */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Compute # of direct rows in root indirect block */
root_direct_rows = DTABLE_MAX_DROWS(fh);
@@ -12519,14 +12519,14 @@ test_huge_insert_one(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert one huge object, then remove %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Allocate heap ID(s) */
if(NULL == (heap_id = (unsigned char *)H5MM_malloc(tparam->actual_id_len)))
TEST_ERROR
@@ -12673,14 +12673,14 @@ test_huge_insert_two(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert two huge objects, then remove %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Allocate heap ID(s) */
if(NULL == (heap_id = (unsigned char *)H5MM_malloc(tparam->actual_id_len)))
TEST_ERROR
@@ -12907,14 +12907,14 @@ test_huge_insert_three(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tp
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert three huge objects, then remove %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Allocate heap ID(s) */
if(NULL == (heap_id = (unsigned char *)H5MM_malloc(tparam->actual_id_len)))
TEST_ERROR
@@ -13217,14 +13217,14 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert mix of normal & huge objects, then remove %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Allocate heap ID(s) */
if(NULL == (heap_id = (unsigned char *)H5MM_malloc(tparam->actual_id_len)))
TEST_ERROR
@@ -13855,14 +13855,14 @@ test_tiny_insert_one(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert one tiny object, then remove %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Allocate heap ID(s) */
if(NULL == (heap_id = (unsigned char *)H5MM_malloc(tparam->actual_id_len)))
TEST_ERROR
@@ -14009,14 +14009,14 @@ test_tiny_insert_two(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert two tiny objects, then remove %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Allocate heap ID(s) */
if(NULL == (heap_id = (unsigned char *)H5MM_malloc(tparam->actual_id_len)))
TEST_ERROR
@@ -14248,14 +14248,14 @@ test_tiny_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert mix of normal, huge & tiny objects, then remove %s"; /* Test description */
- /* Perform common file & heap open operations */
- if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
- TEST_ERROR
-
/* Perform common test initialization operations */
if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
TEST_ERROR
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
/* Allocate heap ID(s) */
if(NULL == (heap_id = (unsigned char *)H5MM_malloc(tparam->actual_id_len)))
TEST_ERROR
@@ -15365,6 +15365,9 @@ test_random(hsize_t size_limit, hid_t fapl, H5HF_create_t *cparam, fheap_test_pa
fheap_heap_state_t state; /* State of fractal heap */
size_t u; /* Local index variable */
+ /* Initialize the heap ID structure */
+ HDmemset(&keep_ids, 0, sizeof(fheap_heap_ids_t));
+
/* Copy heap creation properties */
HDmemcpy(&tmp_cparam, cparam, sizeof(H5HF_create_t));
@@ -15404,9 +15407,6 @@ test_random(hsize_t size_limit, hid_t fapl, H5HF_create_t *cparam, fheap_test_pa
TESTING("inserting random-sized objects, then remove all objects (all - random)")
} /* end else */
- /* Initialize the heap ID structure */
- HDmemset(&keep_ids, 0, sizeof(fheap_heap_ids_t));
-
/* Choose random # seed */
seed = (unsigned long)HDtime(NULL);
#ifdef QAK
@@ -15569,6 +15569,9 @@ test_random_pow2(hsize_t size_limit, hid_t fapl, H5HF_create_t *cparam, fheap_te
fheap_heap_state_t state; /* State of fractal heap */
size_t u; /* Local index variable */
+ /* Initialize the heap ID structure */
+ HDmemset(&keep_ids, 0, sizeof(fheap_heap_ids_t));
+
/* Copy heap creation properties */
HDmemcpy(&tmp_cparam, cparam, sizeof(H5HF_create_t));
@@ -15608,9 +15611,6 @@ test_random_pow2(hsize_t size_limit, hid_t fapl, H5HF_create_t *cparam, fheap_te
TESTING("inserting random-sized objects with power of 2 distribution, then remove all objects (all - random)")
} /* end else */
- /* Initialize the heap ID structure */
- HDmemset(&keep_ids, 0, sizeof(fheap_heap_ids_t));
-
/* Choose random # seed */
seed = (unsigned long)HDtime(NULL);
#ifdef QAK
@@ -15805,6 +15805,9 @@ test_write(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
else
TESTING("writing objects in heap")
+ /* Initialize the heap ID structure */
+ HDmemset(&keep_ids, 0, sizeof(fheap_heap_ids_t));
+
/* Copy heap creation properties */
HDmemcpy(&tmp_cparam, cparam, sizeof(H5HF_create_t));
@@ -15843,9 +15846,6 @@ test_write(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
if(id_len > MAX_HEAP_ID_LEN)
TEST_ERROR
- /* Initialize the heap ID structure */
- HDmemset(&keep_ids, 0, sizeof(fheap_heap_ids_t));
-
/* Create 'tiny' and 'huge' objects */
obj_size = id_len / 2;
@@ -16072,6 +16072,9 @@ test_bug1(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
*/
TESTING("bug1: inserting several objects & removing one, then re-inserting")
+ /* Initialize the heap ID structure */
+ HDmemset(&keep_ids, 0, sizeof(fheap_heap_ids_t));
+
/* Perform common file & heap open operations */
if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
TEST_ERROR
@@ -16082,9 +16085,6 @@ test_bug1(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
if(id_len > MAX_HEAP_ID_LEN)
TEST_ERROR
- /* Initialize the heap ID structure */
- HDmemset(&keep_ids, 0, sizeof(fheap_heap_ids_t));
-
/* Insert objects */
obj_size = 44;
obj_loc = 1;
diff --git a/test/filter_fail.c b/test/filter_fail.c
index abb25cd..9b46421 100644
--- a/test/filter_fail.c
+++ b/test/filter_fail.c
@@ -25,13 +25,13 @@
#include "H5srcdir.h"
#define DSET_NAME "dset_fail"
-#define ONE_MB 1048576
#define H5Z_FILTER_FAIL_TEST 312
#define DIM 10
#define FILTER_CHUNK_DIM 2
const char *FILENAME[] = {
- "filter_fail",
+ "filter_fail_with_cache",
+ "filter_fail_without_cache",
NULL
};
@@ -70,9 +70,6 @@ filter_fail(unsigned int flags, size_t cd_nelmts,
size_t *buf_size, void **buf)
{
int *dst = (int*)(*buf);
- unsigned int offset;
- unsigned int length;
- unsigned int value;
size_t ret_value = 0;
if(flags & H5Z_FLAG_REVERSE) { /* do nothing during read */
@@ -89,7 +86,6 @@ filter_fail(unsigned int flags, size_t cd_nelmts,
}
} /* end else */
-error:
return ret_value;
} /* end filter_fail() */
@@ -112,27 +108,30 @@ error:
* 25 August 2010
*
* Modifications:
- *
+ * Raymond Lu
+ * 5 Oct 2010
+ * Test when the chunk cache is enable and disabled to make
+ * sure the library behaves properly.
*-------------------------------------------------------------------------
*/
static herr_t
-test_filter_write(char *file_name, hid_t my_fapl)
+test_filter_write(char *file_name, hid_t my_fapl, hbool_t cache_enabled)
{
- char filename[1024];
hid_t file = -1;
hid_t dataset=-1; /* dataset ID */
hid_t sid=-1; /* dataspace ID */
hid_t dcpl=-1; /* dataset creation property list ID */
hsize_t dims[1]={DIM}; /* dataspace dimension - 10*/
hsize_t chunk_dims[1]={FILTER_CHUNK_DIM}; /* chunk dimension - 2*/
- int nfilters; /* number of filters in DCPL */
- unsigned flags; /* flags for filter */
int points[DIM]; /* Data */
- int rbuf[DIM]; /* Data */
herr_t ret; /* generic return value */
int i;
- TESTING("data writing when a mandatory filter fails");
+ if(cache_enabled) {
+ TESTING("data writing when a mandatory filter fails and chunk cache is enabled");
+ } else {
+ TESTING("data writing when a mandatory filter fails and chunk cache is disabled");
+ }
/* Create file */
if((file = H5Fcreate(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, my_fapl)) < 0) TEST_ERROR
@@ -161,24 +160,44 @@ test_filter_write(char *file_name, hid_t my_fapl)
for(i = 0; i < DIM; i++)
points[i] = i;
- /* Write data */
- if(H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, points) < 0) TEST_ERROR
+ /* Write data. If the chunk cache is enabled, H5Dwrite should succeed. If it is
+ * diabled, H5Dwrite should fail. */
+ if(cache_enabled) {
+ if(H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, points) < 0)
+ TEST_ERROR
+ } else {
+ /* Data writing should fail */
+ H5E_BEGIN_TRY {
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, points);
+ } H5E_END_TRY;
+ if(ret >= 0) {
+ H5_FAILED();
+ puts(" Data writing is supposed to fail because the chunk can't be written to file.");
+ TEST_ERROR
+ }
+ }
/* clean up objects used for this test */
if(H5Pclose (dcpl) < 0) TEST_ERROR
if(H5Sclose (sid) < 0) TEST_ERROR
- /* Dataset closing should fail */
- H5E_BEGIN_TRY {
- ret = H5Dclose (dataset);
- } H5E_END_TRY;
- if(ret >= 0) {
- H5_FAILED();
- puts(" Dataset is supposed to fail because the chunk can't be flushed to file.");
- TEST_ERROR
+ /* Close dataset. If the chunk cache is enabled, the flushing of chunks should fail
+ * during H5Dclose. If it is diabled, H5Dwrite should fail but H5Dclose should succeed. */
+ if(cache_enabled) {
+ H5E_BEGIN_TRY {
+ ret = H5Dclose (dataset);
+ } H5E_END_TRY;
+ if(ret >= 0) {
+ H5_FAILED();
+ puts(" Dataset is supposed to fail because the chunk can't be flushed to file.");
+ TEST_ERROR
+ }
+ } else {
+ if(H5Dclose (dataset) < 0)
+ TEST_ERROR
}
- /* Even though H5Dclose fails, it should release all resources.
+ /* Even though H5Dclose or H5Dwrite fails, it should release all resources.
* So the file should close successfully. */
if(H5Fclose (file) < 0) TEST_ERROR
@@ -222,7 +241,6 @@ test_filter_read(char *file_name, hid_t my_fapl)
hid_t sid = -1;
hid_t mspace = -1;
hsize_t dims[1]={DIM}; /* dataspace dimension - 10*/
- hsize_t chunk_dims[1]={FILTER_CHUNK_DIM}; /* chunk dimension - 2*/
int rbuf[DIM]; /* Data */
hsize_t dset_size = 0; /* Dataset storage size */
hsize_t hs_offset[H5S_MAX_RANK];
@@ -346,9 +364,9 @@ int main(void)
{
hid_t fapl;
int mdc_nelmts = 0;
- size_t rdcc_nelmts = 521;
- size_t rdcc_nbytes = ONE_MB;
- double rdcc_w0 = 0.75;
+ size_t rdcc_nelmts = 0;
+ size_t rdcc_nbytes = 0;
+ double rdcc_w0 = 0;
char filename[1024];
unsigned nerrors = 0;
@@ -357,12 +375,21 @@ int main(void)
h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
- /* Make sure the chunk cache is used. All values are default. */
+ /* The chunk cache is used so that the flushing of data chunks happens
+ * during H5Dclose. All values are default. */
+ nerrors += (test_filter_write(filename, fapl, TRUE) < 0 ? 1 : 0);
+ nerrors += (test_filter_read(filename, fapl) < 0 ? 1 : 0);
+
+ h5_fixname(FILENAME[1], fapl, filename, sizeof filename);
+
+ /* Disable the chunk cache so that the writing of data chunks happens
+ * during H5Dwrite. */
if(H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0) < 0)
TEST_ERROR
- nerrors += (test_filter_write(filename, fapl) < 0 ? 1 : 0);
- nerrors += (test_filter_read(filename, fapl) < 0 ? 1 : 0);
+ /* Run the test again. */
+ nerrors += (test_filter_write(filename, fapl, FALSE) < 0 ? 1 : 0);
+ nerrors += (test_filter_read(filename, fapl) < 0 ? 1 : 0);
h5_cleanup(FILENAME, fapl);
diff --git a/test/gen_cross.c b/test/gen_cross.c
index 46afc35..32be867 100755
--- a/test/gen_cross.c
+++ b/test/gen_cross.c
@@ -27,19 +27,56 @@
#include "h5test.h"
#define H5FILE_NAME "data.h5"
-#define DATASETNAME "Array"
-#define NX 5 /* dataset dimensions */
-#define NY 6
-#define RANK 2
+#define DATASETNAME "Array"
+#define DATASETNAME2 "Scale_offset_float_data_le"
+#define DATASETNAME3 "Scale_offset_float_data_be"
+#define DATASETNAME4 "Scale_offset_double_data_le"
+#define DATASETNAME5 "Scale_offset_double_data_be"
+#define DATASETNAME6 "Scale_offset_char_data_le"
+#define DATASETNAME7 "Scale_offset_char_data_be"
+#define DATASETNAME8 "Scale_offset_short_data_le"
+#define DATASETNAME9 "Scale_offset_short_data_be"
+#define DATASETNAME10 "Scale_offset_int_data_le"
+#define DATASETNAME11 "Scale_offset_int_data_be"
+#define DATASETNAME12 "Scale_offset_long_long_data_le"
+#define DATASETNAME13 "Scale_offset_long_long_data_be"
+#define NX 6
+#define NY 6
+#define RANK 2
+#define CHUNK0 4
+#define CHUNK1 3
+int create_normal_dset(hid_t fid, hid_t fsid, hid_t msid);
+int create_scale_offset_dsets_float(hid_t fid, hid_t fsid, hid_t msid);
+int create_scale_offset_dsets_double(hid_t fid, hid_t fsid, hid_t msid);
+int create_scale_offset_dsets_char(hid_t fid, hid_t fsid, hid_t msid);
+int create_scale_offset_dsets_short(hid_t fid, hid_t fsid, hid_t msid);
+int create_scale_offset_dsets_int(hid_t fid, hid_t fsid, hid_t msid);
+int create_scale_offset_dsets_long_long(hid_t fid, hid_t fsid, hid_t msid);
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_normal_dset
+ *
+ * Purpose: Create a regular dataset of DOUBLE datatype.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Raymond Lu
+ * Some time ago
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
int
-main (void)
+create_normal_dset(hid_t fid, hid_t fsid, hid_t msid)
{
- hid_t file, dataset; /* file and dataset handles */
- hid_t datatype, dataspace; /* handles */
- hsize_t dimsf[2]; /* dataset dimensions */
- herr_t status;
+ hid_t dataset; /* file and dataset handles */
+ hid_t dcpl;
float data[NX][NY]; /* data to write */
+ float fillvalue = -2.2;
int i, j;
/*
@@ -55,49 +92,776 @@ main (void)
* 2 3 4 5 6 7
* 3 4 5 6 7 8
* 4 5 6 7 8 9
+ * 5 6 7 8 9 10
+ * -2.2 -2.2 -2.2 -2.2 -2.2 -2.2
*/
/*
- * Create a new file using H5F_ACC_TRUNC access,
- * default file creation properties, and default file
- * access properties.
+ * Create the dataset creation property list, set the fill value.
*/
- file = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(dcpl, H5T_NATIVE_FLOAT, &fillvalue) < 0)
+ TEST_ERROR
/*
- * Describe the size of the array and create the data space for fixed
- * size dataset.
+ * Create a new dataset within the file using defined dataspace and
+ * datatype and default dataset creation properties.
*/
- dimsf[0] = NX;
- dimsf[1] = NY;
- dataspace = H5Screate_simple(RANK, dimsf, NULL);
+ if((dataset = H5Dcreate2(fid, DATASETNAME, H5T_NATIVE_DOUBLE, fsid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
/*
- * Define datatype for the data in the file.
- * We will store little endian INT numbers.
+ * Write the data to the dataset using default transfer properties.
*/
- datatype = H5Tcopy(H5T_NATIVE_DOUBLE);
+ if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
/*
- * Create a new dataset within the file using defined dataspace and
- * datatype and default dataset creation properties.
+ * Close/release resources.
+ */
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ return 0;
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dataset);
+ } H5E_END_TRY;
+
+ return -1;
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_scale_offset_dsets_float
+ *
+ * Purpose: Create a dataset of FLOAT datatype with scale-offset filter
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Neil Fortner
+ * 27 January 2011
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+create_scale_offset_dsets_float(hid_t fid, hid_t fsid, hid_t msid)
+{
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ hid_t dataset; /* dataset handles */
+ hid_t dcpl;
+ float data[NX][NY]; /* data to write */
+ float fillvalue = -2.2;
+ hsize_t chunk[RANK] = {CHUNK0, CHUNK1};
+ int i, j;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++)
+ data[j][i] = ((float)(i + j + 1))/3;
+ }
+
+ /*
+ * Create the dataset creation property list, add the Scale-Offset
+ * filter, set the chunk size, and set the fill value.
+ */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_scaleoffset(dcpl, H5Z_SO_FLOAT_DSCALE, 3) < 0)
+ TEST_ERROR
+ if(H5Pset_chunk(dcpl, RANK, chunk) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(dcpl, H5T_NATIVE_FLOAT, &fillvalue) < 0)
+ TEST_ERROR
+
+ /*
+ * Create a new dataset within the file using defined dataspace, little
+ * endian datatype and default dataset creation properties.
+ */
+ if((dataset = H5Dcreate2(fid, DATASETNAME2, H5T_IEEE_F32LE, fsid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /*
+ * Write the data to the dataset using default transfer properties.
+ */
+ if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+
+ /* Close dataset */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Now create a dataset with a big-endian type */
+ if((dataset = H5Dcreate2(fid, DATASETNAME3, H5T_IEEE_F32BE, fsid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if(H5Dwrite(dataset, H5T_NATIVE_FLOAT, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+
+#else /* H5_HAVE_FILTER_SCALEOFFSET */
+ const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
+
+ puts(not_supported);
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+
+ return 0;
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dataset);
+ } H5E_END_TRY;
+
+ return -1;
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_scale_offset_dsets_double
+ *
+ * Purpose: Create a dataset of DOUBLE datatype with scale-offset filter
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Raymond Lu
+ * 21 January 2011
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+create_scale_offset_dsets_double(hid_t fid, hid_t fsid, hid_t msid)
+{
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ hid_t dataset; /* dataset handles */
+ hid_t dcpl;
+ double data[NX][NY]; /* data to write */
+ double fillvalue = -2.2;
+ hsize_t chunk[RANK] = {CHUNK0, CHUNK1};
+ int i, j;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++)
+ data[j][i] = ((double)(i + j + 1))/3;
+ }
+
+ /*
+ * Create the dataset creation property list, add the Scale-Offset
+ * filter, set the chunk size, and set the fill value.
+ */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_scaleoffset(dcpl, H5Z_SO_FLOAT_DSCALE, 3) < 0)
+ TEST_ERROR
+ if(H5Pset_chunk(dcpl, RANK, chunk) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fillvalue) < 0)
+ TEST_ERROR
+
+ /*
+ * Create a new dataset within the file using defined dataspace, little
+ * endian datatype and default dataset creation properties.
+ */
+ if((dataset = H5Dcreate2(fid, DATASETNAME4, H5T_IEEE_F64LE, fsid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /*
+ * Write the data to the dataset using default transfer properties.
+ */
+ if(H5Dwrite(dataset, H5T_NATIVE_DOUBLE, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+
+ /* Close dataset */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Now create a dataset with a big-endian type */
+ if((dataset = H5Dcreate2(fid, DATASETNAME5, H5T_IEEE_F64BE, fsid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if(H5Dwrite(dataset, H5T_NATIVE_DOUBLE, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+
+#else /* H5_HAVE_FILTER_SCALEOFFSET */
+ const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
+
+ puts(not_supported);
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+
+ return 0;
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dataset);
+ } H5E_END_TRY;
+
+ return -1;
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_scale_offset_dset_char
+ *
+ * Purpose: Create a dataset of CHAR datatype with scale-offset filter
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Neil Fortner
+ * 27 January 2011
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+create_scale_offset_dsets_char(hid_t fid, hid_t fsid, hid_t msid)
+{
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ hid_t dataset; /* dataset handles */
+ hid_t dcpl;
+ char data[NX][NY]; /* data to write */
+ char fillvalue = -2;
+ hsize_t chunk[RANK] = {CHUNK0, CHUNK1};
+ int i, j;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++)
+ data[j][i] = i + j;
+ }
+ /*
+ * 0 1 2 3 4 5
+ * 1 2 3 4 5 6
+ * 2 3 4 5 6 7
+ * 3 4 5 6 7 8
+ * 4 5 6 7 8 9
+ * 5 6 7 8 9 10
+ */
+
+ /*
+ * Create the dataset creation property list, add the Scale-Offset
+ * filter, set the chunk size, and set the fill value.
+ */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_scaleoffset(dcpl, H5Z_SO_INT, H5Z_SO_INT_MINBITS_DEFAULT) < 0)
+ TEST_ERROR
+ if(H5Pset_chunk(dcpl, RANK, chunk) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(dcpl, H5T_NATIVE_CHAR, &fillvalue) < 0)
+ TEST_ERROR
+
+ /*
+ * Create a new dataset within the file using defined dataspace, little
+ * endian datatype and default dataset creation properties.
+ */
+ if((dataset = H5Dcreate2(fid, DATASETNAME6, H5T_STD_I8LE, fsid, H5P_DEFAULT,
+ dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /*
+ * Write the data to the dataset using default transfer properties.
+ */
+ if(H5Dwrite(dataset, H5T_NATIVE_CHAR, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+
+ /* Close dataset */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Now create a dataset with a big-endian type */
+ if((dataset = H5Dcreate2(fid, DATASETNAME7, H5T_STD_I8BE, fsid, H5P_DEFAULT,
+ dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if(H5Dwrite(dataset, H5T_NATIVE_CHAR, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+
+#else /* H5_HAVE_FILTER_SCALEOFFSET */
+ const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
+
+ puts(not_supported);
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+
+ return 0;
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dataset);
+ } H5E_END_TRY;
+
+ return -1;
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_scale_offset_dset_short
+ *
+ * Purpose: Create a dataset of SHORT datatype with scale-offset filter
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Neil Fortner
+ * 27 January 2011
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+create_scale_offset_dsets_short(hid_t fid, hid_t fsid, hid_t msid)
+{
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ hid_t dataset; /* dataset handles */
+ hid_t dcpl;
+ short data[NX][NY]; /* data to write */
+ short fillvalue = -2;
+ hsize_t chunk[RANK] = {CHUNK0, CHUNK1};
+ int i, j;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++)
+ data[j][i] = i + j;
+ }
+ /*
+ * 0 1 2 3 4 5
+ * 1 2 3 4 5 6
+ * 2 3 4 5 6 7
+ * 3 4 5 6 7 8
+ * 4 5 6 7 8 9
+ * 5 6 7 8 9 10
+ */
+
+ /*
+ * Create the dataset creation property list, add the Scale-Offset
+ * filter, set the chunk size, and set the fill value.
+ */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_scaleoffset(dcpl, H5Z_SO_INT, H5Z_SO_INT_MINBITS_DEFAULT) < 0)
+ TEST_ERROR
+ if(H5Pset_chunk(dcpl, RANK, chunk) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(dcpl, H5T_NATIVE_SHORT, &fillvalue) < 0)
+ TEST_ERROR
+
+ /*
+ * Create a new dataset within the file using defined dataspace, little
+ * endian datatype and default dataset creation properties.
+ */
+ if((dataset = H5Dcreate2(fid, DATASETNAME8, H5T_STD_I16LE, fsid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /*
+ * Write the data to the dataset using default transfer properties.
+ */
+ if(H5Dwrite(dataset, H5T_NATIVE_SHORT, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+
+ /* Close dataset */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Now create a dataset with a big-endian type */
+ if((dataset = H5Dcreate2(fid, DATASETNAME9, H5T_STD_I16BE, fsid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if(H5Dwrite(dataset, H5T_NATIVE_SHORT, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+
+#else /* H5_HAVE_FILTER_SCALEOFFSET */
+ const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
+
+ puts(not_supported);
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+
+ return 0;
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dataset);
+ } H5E_END_TRY;
+
+ return -1;
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_scale_offset_dset_int
+ *
+ * Purpose: Create a dataset of INT datatype with scale-offset filter
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Raymond Lu
+ * 21 January 2011
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+create_scale_offset_dsets_int(hid_t fid, hid_t fsid, hid_t msid)
+{
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ hid_t dataset; /* dataset handles */
+ hid_t dcpl;
+ int data[NX][NY]; /* data to write */
+ int fillvalue = -2;
+ hsize_t chunk[RANK] = {CHUNK0, CHUNK1};
+ int i, j;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++)
+ data[j][i] = i + j;
+ }
+ /*
+ * 0 1 2 3 4 5
+ * 1 2 3 4 5 6
+ * 2 3 4 5 6 7
+ * 3 4 5 6 7 8
+ * 4 5 6 7 8 9
+ * 5 6 7 8 9 10
+ */
+
+ /*
+ * Create the dataset creation property list, add the Scale-Offset
+ * filter, set the chunk size, and set the fill value.
+ */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_scaleoffset(dcpl, H5Z_SO_INT, H5Z_SO_INT_MINBITS_DEFAULT) < 0)
+ TEST_ERROR
+ if(H5Pset_chunk(dcpl, RANK, chunk) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fillvalue) < 0)
+ TEST_ERROR
+
+ /*
+ * Create a new dataset within the file using defined dataspace, little
+ * endian datatype and default dataset creation properties.
+ */
+ if((dataset = H5Dcreate2(fid, DATASETNAME10, H5T_STD_I32LE, fsid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /*
+ * Write the data to the dataset using default transfer properties.
+ */
+ if(H5Dwrite(dataset, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+
+ /* Close dataset */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Now create a dataset with a big-endian type */
+ if((dataset = H5Dcreate2(fid, DATASETNAME11, H5T_STD_I32BE, fsid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if(H5Dwrite(dataset, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+
+#else /* H5_HAVE_FILTER_SCALEOFFSET */
+ const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
+
+ puts(not_supported);
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+
+ return 0;
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dataset);
+ } H5E_END_TRY;
+
+ return -1;
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_scale_offset_dset_long_long
+ *
+ * Purpose: Create a dataset of LONG LONG datatype with scale-offset
+ * filter
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Neil Fortner
+ * 27 January 2011
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+create_scale_offset_dsets_long_long(hid_t fid, hid_t fsid, hid_t msid)
+{
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+ hid_t dataset; /* dataset handles */
+ hid_t dcpl;
+ long long data[NX][NY]; /* data to write */
+ long long fillvalue = -2;
+ hsize_t chunk[RANK] = {CHUNK0, CHUNK1};
+ int i, j;
+
+ /*
+ * Data and output buffer initialization.
+ */
+ for (j = 0; j < NX; j++) {
+ for (i = 0; i < NY; i++)
+ data[j][i] = i + j;
+ }
+ /*
+ * 0 1 2 3 4 5
+ * 1 2 3 4 5 6
+ * 2 3 4 5 6 7
+ * 3 4 5 6 7 8
+ * 4 5 6 7 8 9
+ * 5 6 7 8 9 10
+ */
+
+ /*
+ * Create the dataset creation property list, add the Scale-Offset
+ * filter, set the chunk size, and set the fill value.
+ */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_scaleoffset(dcpl, H5Z_SO_INT, H5Z_SO_INT_MINBITS_DEFAULT) < 0)
+ TEST_ERROR
+ if(H5Pset_chunk(dcpl, RANK, chunk) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(dcpl, H5T_NATIVE_LLONG, &fillvalue) < 0)
+ TEST_ERROR
+
+ /*
+ * Create a new dataset within the file using defined dataspace, little
+ * endian datatype and default dataset creation properties.
*/
- dataset = H5Dcreate2(file, DATASETNAME, datatype, dataspace,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ if((dataset = H5Dcreate2(fid, DATASETNAME12, H5T_STD_I64LE, fsid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
/*
* Write the data to the dataset using default transfer properties.
*/
- status = H5Dwrite(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
- H5P_DEFAULT, data);
+ if(H5Dwrite(dataset, H5T_NATIVE_LLONG, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+
+ /* Close dataset */
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /* Now create a dataset with a big-endian type */
+ if((dataset = H5Dcreate2(fid, DATASETNAME13, H5T_STD_I64BE, fsid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if(H5Dwrite(dataset, H5T_NATIVE_LLONG, msid, fsid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+ if(H5Dclose(dataset) < 0)
+ TEST_ERROR
+
+ /*
+ * Close/release resources.
+ */
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+
+#else /* H5_HAVE_FILTER_SCALEOFFSET */
+ const char *not_supported= "Scaleoffset filter is not enabled. Can't create the dataset.";
+
+ puts(not_supported);
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+
+ return 0;
+
+#ifdef H5_HAVE_FILTER_SCALEOFFSET
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dataset);
+ } H5E_END_TRY;
+
+ return -1;
+#endif /* H5_HAVE_FILTER_SCALEOFFSET */
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Create a file for cross_read.c test.
+ *
+ * Return: Success: exit(0)
+ * Failure: exit(1)
+ *
+ * Programmer: Raymond Lu
+ * Some time ago
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main (void)
+{
+ hid_t file = -1;
+ hid_t filespace = -1;
+ hid_t memspace = -1;
+ hsize_t dimsf[RANK];
+ hsize_t start[RANK] = {0, 0};
+
+ /*
+ * Create a new file using H5F_ACC_TRUNC access,
+ * default file creation properties, and default file
+ * access properties.
+ */
+ if((file = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT))
+ < 0)
+ {H5_FAILED(); AT(); return 1;}
+
+ /*
+ * Describe the size of the array and create the data space for fixed
+ * size dataset. Increase the size in the X direction to have some fill
+ * values.
+ */
+ dimsf[0] = NX + 1;
+ dimsf[1] = NY;
+ if((filespace = H5Screate_simple(RANK, dimsf, NULL)) < 0)
+ {H5_FAILED(); AT(); return 1;}
+ dimsf[0] = NX;
+ if(H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, NULL, dimsf, NULL)
+ < 0)
+ {H5_FAILED(); AT(); return 1;}
+
+ /* Create memory space. This does not include the extra row for fill
+ * values. */
+ HDassert(dimsf[0] == NX);
+ HDassert(dimsf[1] == NY);
+ if((memspace = H5Screate_simple(RANK, dimsf, NULL)) < 0)
+ {H5_FAILED(); AT(); return 1;}
+
+ /* Create a regular dataset */
+ if(create_normal_dset(file, filespace, memspace) < 0)
+ {H5_FAILED(); AT(); return 1;}
+
+ /* Create a dataset of FLOAT with scale-offset filter */
+ if(create_scale_offset_dsets_float(file, filespace, memspace) < 0)
+ {H5_FAILED(); AT(); return 1;}
+
+ /* Create a dataset of DOUBLE with scale-offset filter */
+ if(create_scale_offset_dsets_double(file, filespace, memspace) < 0)
+ {H5_FAILED(); AT(); return 1;}
+
+ /* Create a dataset of CHAR with scale-offset filter */
+ if(create_scale_offset_dsets_char(file, filespace, memspace) < 0)
+ {H5_FAILED(); AT(); return 1;}
+
+ /* Create a dataset of SHORT with scale-offset filter */
+ if(create_scale_offset_dsets_short(file, filespace, memspace) < 0)
+ {H5_FAILED(); AT(); return 1;}
+
+ /* Create a dataset of INT with scale-offset filter */
+ if(create_scale_offset_dsets_int(file, filespace, memspace) < 0)
+ {H5_FAILED(); AT(); return 1;}
+
+ /* Create a dataset of LONG LONG with scale-offset filter */
+ if(create_scale_offset_dsets_long_long(file, filespace, memspace) < 0)
+ {H5_FAILED(); AT(); return 1;}
/*
* Close/release resources.
*/
- H5Sclose(dataspace);
- H5Tclose(datatype);
- H5Dclose(dataset);
- H5Fclose(file);
+ if(H5Sclose(memspace) < 0)
+ {H5_FAILED(); AT(); return 1;}
+ if(H5Sclose(filespace) < 0)
+ {H5_FAILED(); AT(); return 1;}
+ if(H5Fclose(file) < 0)
+ {H5_FAILED(); AT(); return 1;}
return 0;
}
diff --git a/test/h5test.c b/test/h5test.c
index 229efec..25b751f 100644
--- a/test/h5test.c
+++ b/test/h5test.c
@@ -27,11 +27,11 @@
#include "h5test.h"
#include "H5srcdir.h"
-#ifdef _WIN32
+#ifdef H5_HAVE_WINSOCK_H
#include <process.h>
#include <direct.h>
#include <winsock2.h>
-#endif /* _WIN32 */
+#endif /* H5_HAVE_WINSOCK_H */
/*
* Define these environment variables or constants to influence functions in
@@ -648,7 +648,7 @@ void
h5_show_hostname(void)
{
char hostname[80];
-#ifdef _WIN32
+#ifdef H5_HAVE_WINSOCK_H
WSADATA wsaData;
int err;
#endif
@@ -670,7 +670,7 @@ h5_show_hostname(void)
#else
printf("thread 0.");
#endif
-#ifdef _WIN32
+#ifdef H5_HAVE_WINSOCK_H
err = WSAStartup( MAKEWORD(2,2), &wsaData );
if ( err != 0 ) {
@@ -700,7 +700,7 @@ h5_show_hostname(void)
#else
printf(" gethostname not supported\n");
#endif
-#ifdef _WIN32
+#ifdef H5_HAVE_WINSOCK_H
WSACleanup();
#endif
}
diff --git a/test/h5test.h b/test/h5test.h
index 70408d9..0467cd7 100644
--- a/test/h5test.h
+++ b/test/h5test.h
@@ -83,7 +83,7 @@
* This contains the filename prefix specificied as command line option for
* the parallel test files.
*/
-H5_DLLVAR char *paraprefix;
+H5TEST_DLLVAR char *paraprefix;
#ifdef H5_HAVE_PARALLEL
extern MPI_Info h5_io_info_g; /* MPI INFO object for IO */
#endif
diff --git a/test/le_data.h5 b/test/le_data.h5
index c944c9f..1225d21 100644
--- a/test/le_data.h5
+++ b/test/le_data.h5
Binary files differ
diff --git a/test/links.c b/test/links.c
index 12b8ba4..c8ece99 100644
--- a/test/links.c
+++ b/test/links.c
@@ -2640,6 +2640,8 @@ external_link_dangling(hid_t fapl, hbool_t new_format)
{
hid_t fid = (-1); /* File ID */
hid_t gid = (-1); /* Group IDs */
+ hid_t rid = (-1); /* Root Group ID */
+ hid_t status = (-1); /* Status */
char filename1[NAME_BUF_SIZE],
filename2[NAME_BUF_SIZE]; /* Names of files to externally link across */
@@ -2672,6 +2674,9 @@ external_link_dangling(hid_t fapl, hbool_t new_format)
/* Open first file */
if((fid=H5Fopen(filename1, H5F_ACC_RDWR, fapl)) < 0) TEST_ERROR
+ /* Get root group ID */
+ if((rid=H5Gopen2(fid, "/", H5P_DEFAULT)) < 0) TEST_ERROR;
+
/* Open object through dangling file external link */
H5E_BEGIN_TRY {
gid = H5Gopen2(fid, "no_file", H5P_DEFAULT);
@@ -2692,125 +2697,36 @@ external_link_dangling(hid_t fapl, hbool_t new_format)
goto error;
}
- /* Close first file */
- if(H5Fclose(fid) < 0) TEST_ERROR
-
-
- PASSED();
- return 0;
-
- error:
+ /* Try to get name of object by index through dangling file external link */
H5E_BEGIN_TRY {
- H5Gclose (gid);
- H5Fclose (fid);
+ status = H5Lget_name_by_idx(rid, "no_file", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, 0, H5P_DEFAULT);
} H5E_END_TRY;
- return -1;
-} /* end external_link_dangling() */
-
-
-
-/*-------------------------------------------------------------------------
- * Function: external_link_env: test 1
- *
- * Purpose:
- * 1. target link: "extlinks1"
- * 2. main file: "extlinks0"
- * 3. target file: "tmp/extlinks1"
- * 4. The environment variable "HDF5_EXT_PREFIX" should be set to ".:tmp"
- * Should be able to access the target file in tmp directory through searching
- * the pathnames set in HDF5_EXT_PREFIX.
- * This test will be skipped if HDF5_EXT_PREFIX is not set as expected.
- *
- *
- * Return: Success: 0
- * Failure: -1
- *
- * Programmer: Vailin Choi
- * Feb. 20, 2008
- *
- * Modifications:
- *
- *-------------------------------------------------------------------------
- */
-static int
-external_link_env(hid_t fapl, hbool_t new_format)
-{
- hid_t fid = (-1); /* File ID */
- hid_t gid = (-1); /* Group IDs */
- const char *envval = NULL;
-
- char filename1[NAME_BUF_SIZE],
- filename2[NAME_BUF_SIZE],
- filename3[NAME_BUF_SIZE];
-
- if(new_format)
- TESTING("external links via environment variable (w/new group format)")
- else
- TESTING("external links via environment variable")
-
- if ((envval = HDgetenv("HDF5_EXT_PREFIX")) == NULL)
- envval = "nomatch";
- if (HDstrcmp(envval, ".:tmp")) {
- SKIPPED();
- return(0);
- }
-
- /* set up name for main file:"extlinks0" */
- h5_fixname(FILENAME[12], fapl, filename1, sizeof filename1);
- /* set up name for external linked target file: "extlinks1" */
- h5_fixname(FILENAME[14], fapl, filename2, sizeof filename2);
-
- if(HDmkdir(TMPDIR, (mode_t)0755) < 0 && errno != EEXIST)
- TEST_ERROR
-
- /* set up name for target file: "tmp/extlinks1" */
- h5_fixname(FILENAME[15], fapl, filename3, sizeof filename3);
+ if (status >= 0) {
+ H5_FAILED();
+ puts(" Retreiving name of object by index through dangling file external link should have failed.");
+ } /* end if */
- /* Create the target file */
- if((fid=H5Fcreate(filename3, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR
- if((gid=H5Gcreate2(fid, "A", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* Close root group */
+ if(H5Gclose(rid) < 0) TEST_ERROR
- /* closing for target file */
- if(H5Gclose(gid) < 0) TEST_ERROR
+ /* Close first file */
if(H5Fclose(fid) < 0) TEST_ERROR
- /* Create the main file */
- if((fid=H5Fcreate(filename1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR
-
- /* Create external link to target file */
- if(H5Lcreate_external(filename2, "/A", fid, "ext_link", H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
-
- /* Open object through external link */
- H5E_BEGIN_TRY {
- gid = H5Gopen2(fid, "ext_link", H5P_DEFAULT);
- } H5E_END_TRY;
-
- /* should be able to find the target file from pathnames set via environment variable */
- if (gid < 0) {
- H5_FAILED();
- puts(" Should have found the file in tmp directory.");
- goto error;
- }
-
- /* closing for main file */
- if(H5Gclose(gid) < 0) TEST_ERROR
- if(H5Fclose(fid) < 0) TEST_ERROR
-
PASSED();
return 0;
error:
H5E_BEGIN_TRY {
- H5Gclose (gid);
- H5Fclose (fid);
+ H5Gclose (gid);
+ H5Fclose (fid);
} H5E_END_TRY;
return -1;
-} /* end external_link_env() */
+} /* end external_link_dangling() */
/*-------------------------------------------------------------------------
- * Function: external_link_prefix: test 2
+ * Function: external_link_prefix
*
* Purpose: 1. target link: "extlinks2"
* 2. main file: "extlinks0"
@@ -4033,6 +3949,7 @@ external_set_elink_acc_flags(hid_t fapl, hbool_t new_format)
hid_t file1 = -1, file2 = -1, group = -1, subgroup = -1, gapl = -1;
char filename1[NAME_BUF_SIZE],
filename2[NAME_BUF_SIZE];
+ herr_t ret;
unsigned flags;
if(new_format)
@@ -4083,6 +4000,24 @@ external_set_elink_acc_flags(hid_t fapl, hbool_t new_format)
} H5E_END_TRY;
if(subgroup != FAIL) TEST_ERROR
+ /* Attempt to set invalid flags on gapl */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_elink_acc_flags(gapl, H5F_ACC_TRUNC);
+ } H5E_END_TRY;
+ if(ret != FAIL) TEST_ERROR
+ H5E_BEGIN_TRY {
+ ret = H5Pset_elink_acc_flags(gapl, H5F_ACC_EXCL);
+ } H5E_END_TRY;
+ if(ret != FAIL) TEST_ERROR
+ H5E_BEGIN_TRY {
+ ret = H5Pset_elink_acc_flags(gapl, H5F_ACC_DEBUG);
+ } H5E_END_TRY;
+ if(ret != FAIL) TEST_ERROR
+ H5E_BEGIN_TRY {
+ ret = H5Pset_elink_acc_flags(gapl, H5F_ACC_CREAT);
+ } H5E_END_TRY;
+ if(ret != FAIL) TEST_ERROR
+
/* Close file1 and group */
if(H5Gclose(group) < 0) TEST_ERROR
if(H5Fclose(file1) < 0) TEST_ERROR
@@ -13996,7 +13931,6 @@ main(void)
nerrors += external_link_strong(my_fapl, new_format) < 0 ? 1 : 0;
/* tests for external link */
- nerrors += external_link_env(my_fapl, new_format) < 0 ? 1 : 0;
nerrors += external_link_prefix(my_fapl, new_format) < 0 ? 1 : 0;
nerrors += external_link_abs_mainpath(my_fapl, new_format) < 0 ? 1 : 0;
nerrors += external_link_rel_mainpath(my_fapl, new_format) < 0 ? 1 : 0;
diff --git a/test/links_env.c b/test/links_env.c
new file mode 100644
index 0000000..c792386
--- /dev/null
+++ b/test/links_env.c
@@ -0,0 +1,192 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Purpose: Tests hard, soft (symbolic) & external links.
+ */
+
+#define H5G_PACKAGE
+#define H5G_TESTING
+
+#include "h5test.h"
+#include "H5srcdir.h"
+#include "H5Gpkg.h" /* Groups */
+#include "H5Iprivate.h" /* IDs */
+#include "H5Lprivate.h" /* Links */
+
+#ifdef H5_VMS
+#define TMPDIR "[.tmp]"
+#else /* H5_VMS */
+#define TMPDIR "tmp/"
+#endif /* H5_VMS */
+#define NAME_BUF_SIZE 1024
+
+const char *FILENAME[] = {
+ "extlinks_env0", /* 0: main file */
+ "extlinks_env1", /* 1: target file */
+ TMPDIR "extlinks_env1", /* 2 */
+ NULL
+};
+
+static int external_link_env(hid_t fapl, hbool_t new_format);
+
+
+/*-------------------------------------------------------------------------
+ * Function: external_link_env (moved from links.c)
+ *
+ * Purpose: Verify that the target file is found successfully in "tmp" directory
+ * via searching the pathnames set in the environment variable HDF5_EXT_PREFIX.
+ * 1. Target link: "extlinks_env1"
+ * 2. Main file: "extlinks_env0"
+ * 3. Target file is created in: "tmp/extlinks_env1"
+ * 4. The environment variable "HDF5_EXT_PREFIX" is set to ".:tmp"
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Vailin Choi
+ * Feb. 20, 2008
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+external_link_env(hid_t fapl, hbool_t new_format)
+{
+ hid_t fid = (-1); /* File ID */
+ hid_t gid = (-1); /* Group IDs */
+ const char *envval = NULL; /* Pointer to environment variable */
+ char filename1[NAME_BUF_SIZE],
+ filename2[NAME_BUF_SIZE],
+ filename3[NAME_BUF_SIZE]; /* Holders for filename */
+
+ if(new_format)
+ TESTING("external links via environment variable (w/new group format)")
+ else
+ TESTING("external links via environment variable")
+
+ if ((envval = HDgetenv("HDF5_EXT_PREFIX")) == NULL)
+ envval = "nomatch";
+ if (HDstrcmp(envval, ".:tmp")) TEST_ERROR
+
+ /* Set up name for main file:"extlinks_env0" */
+ h5_fixname(FILENAME[0], fapl, filename1, sizeof filename1);
+
+ /* Set up name for external linked target file: "extlinks_env1" */
+ h5_fixname(FILENAME[1], fapl, filename2, sizeof filename2);
+
+ /* Create "tmp" directory */
+ if(HDmkdir(TMPDIR, (mode_t)0755) < 0 && errno != EEXIST)
+ TEST_ERROR
+
+ /* Set up name (location) for the target file: "tmp/extlinks1" */
+ h5_fixname(FILENAME[2], fapl, filename3, sizeof filename3);
+
+ /* Create the target file in "tmp" directory */
+ if((fid=H5Fcreate(filename3, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR
+ if((gid=H5Gcreate2(fid, "A", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Closing for target file */
+ if(H5Gclose(gid) < 0) TEST_ERROR
+ if(H5Fclose(fid) < 0) TEST_ERROR
+
+
+ /* Create the main file */
+ if((fid=H5Fcreate(filename1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR
+
+ /* Create external link to target file */
+ if(H5Lcreate_external(filename2, "/A", fid, "ext_link", H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+
+ /* Open object through external link */
+ H5E_BEGIN_TRY {
+ gid = H5Gopen2(fid, "ext_link", H5P_DEFAULT);
+ } H5E_END_TRY;
+
+ /* Should be able to find the target file from pathnames set via HDF5_EXT_PREFIX */
+ if (gid < 0) {
+ H5_FAILED();
+ puts(" Should have found the file in tmp directory.");
+ goto error;
+ }
+
+ /* closing for main file */
+ if(H5Gclose(gid) < 0) TEST_ERROR
+ if(H5Fclose(fid) < 0) TEST_ERROR
+
+ PASSED();
+ return 0;
+
+ error:
+ H5E_BEGIN_TRY {
+ H5Gclose (gid);
+ H5Fclose (fid);
+ } H5E_END_TRY;
+ return -1;
+} /* end external_link_env() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Test external link with environment variable HDF5_EXT_PREFIX
+ *
+ * Return: Success: exit(0)
+ * Failure: exit(non-zero)
+ *
+ * Programmer: Vailin Choi; Nov 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ hid_t fapl; /* File access property lists */
+ int nerrors = 0; /* Error from tests */
+ const char *env_h5_drvr; /* File Driver value from environment */
+
+ env_h5_drvr = HDgetenv("HDF5_DRIVER");
+ if(env_h5_drvr == NULL)
+ env_h5_drvr = "nomatch";
+
+ h5_reset();
+ fapl = h5_fileaccess();
+
+ nerrors += external_link_env(fapl, FALSE) < 0 ? 1 : 0;
+
+ /* Set the "use the latest version of the format" bounds for creating objects in the file */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) TEST_ERROR
+
+ nerrors += external_link_env(fapl, TRUE) < 0 ? 1 : 0;
+
+ h5_cleanup(FILENAME, fapl);
+
+ /* Results */
+ if(nerrors) {
+ printf("***** %d External Link (HDF5_EXT_PREFIX) test%s FAILED! *****\n",
+ nerrors, 1 == nerrors ? "" : "s");
+ exit(1);
+ }
+ printf("All external Link (HDF5_EXT_PREFIX) tests passed.\n");
+
+ /* clean up tmp directory created by external link tests */
+ HDrmdir(TMPDIR);
+
+ return 0;
+
+error:
+ puts("*** TESTS FAILED ***");
+ return 1;
+}
diff --git a/test/mf.c b/test/mf.c
index 6bfa44e..2b55417 100644
--- a/test/mf.c
+++ b/test/mf.c
@@ -178,7 +178,7 @@ static unsigned
test_mf_eoa(const char *env_h5_drvr, hid_t fapl)
{
hid_t file = -1; /* File ID */
- hid_t fapl_new; /* copy of fapl */
+ hid_t fapl_new = -1; /* copy of fapl */
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
h5_stat_size_t file_size, new_file_size; /* file size */
@@ -277,9 +277,12 @@ test_mf_eoa(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != file_size)
+ if(new_file_size != file_size)
TEST_ERROR
+ if(H5Pclose(fapl_new) < 0)
+ FAIL_STACK_ERROR
+
PASSED()
} /* end if */
else {
@@ -291,6 +294,7 @@ test_mf_eoa(const char *env_h5_drvr, hid_t fapl)
error:
H5E_BEGIN_TRY {
+ H5Pclose(fapl_new);
H5Fclose(file);
} H5E_END_TRY;
return(1);
@@ -317,7 +321,7 @@ static unsigned
test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
{
hid_t file = -1; /* File ID */
- hid_t fapl_new; /* copy of fapl */
+ hid_t fapl_new = -1; /* copy of fapl */
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
h5_stat_size_t file_size, new_file_size; /* file size */
@@ -403,8 +407,10 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
/* nothing should be changed in meta_aggr */
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &ma_size);
- if (new_ma_addr != ma_addr) TEST_ERROR
- if (new_ma_size != ma_size) TEST_ERROR
+ if(new_ma_addr != ma_addr)
+ TEST_ERROR
+ if(new_ma_size != ma_size)
+ TEST_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -414,7 +420,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != file_size)
+ if(new_file_size != file_size)
TEST_ERROR
PASSED()
@@ -462,7 +468,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != (file_size+TEST_BLOCK_SIZE30))
+ if(new_file_size != (file_size + TEST_BLOCK_SIZE30))
TEST_ERROR
PASSED()
@@ -506,7 +512,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != (file_size+TEST_BLOCK_SIZE30))
+ if(new_file_size != (file_size + TEST_BLOCK_SIZE30))
TEST_ERROR
PASSED()
@@ -538,8 +544,10 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
/* nothing should be changed in meta_aggr */
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &ma_size);
- if (new_ma_addr != ma_addr) TEST_ERROR
- if (new_ma_size != ma_size) TEST_ERROR
+ if(new_ma_addr != ma_addr)
+ TEST_ERROR
+ if(new_ma_size != ma_size)
+ TEST_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -549,9 +557,12 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != (file_size+10))
+ if(new_file_size != (file_size + 10))
TEST_ERROR
+ if(H5Pclose(fapl_new) < 0)
+ FAIL_STACK_ERROR
+
PASSED()
} /* end if */
else {
@@ -563,6 +574,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
error:
H5E_BEGIN_TRY {
+ H5Pclose(fapl_new);
H5Fclose(file);
} H5E_END_TRY;
return(1);
@@ -585,7 +597,7 @@ static unsigned
test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
{
hid_t file = -1; /* File ID */
- hid_t fapl_new; /* copy of fapl */
+ hid_t fapl_new = -1; /* copy of fapl */
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
h5_stat_size_t file_size, new_file_size; /* File size */
@@ -621,8 +633,10 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Turn off using meta/small data aggregator */
- H5Pset_meta_block_size(fapl_new, (hsize_t)0);
- H5Pset_small_data_block_size(fapl_new, (hsize_t)0);
+ if(H5Pset_meta_block_size(fapl_new, (hsize_t)0) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_small_data_block_size(fapl_new, (hsize_t)0) < 0)
+ FAIL_STACK_ERROR
/* Re-open the file with meta/small data setting */
if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
@@ -652,7 +666,7 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != (file_size+TEST_BLOCK_SIZE30))
+ if(new_file_size != (file_size + TEST_BLOCK_SIZE30))
TEST_ERROR
/* Re-open the file */
@@ -682,7 +696,7 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != (file_size+TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50))
+ if(new_file_size != (file_size + TEST_BLOCK_SIZE30 + TEST_BLOCK_SIZE50))
TEST_ERROR
PASSED()
@@ -715,12 +729,12 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
type = H5FD_MEM_SUPER;
addr = H5MF_alloc(f, type, H5P_DATASET_XFER_DEFAULT, (hsize_t)TEST_BLOCK_SIZE30);
- if (addr < (haddr_t)file_size)
+ if(addr < (haddr_t)file_size)
TEST_ERROR
/* nothing should be changed in meta_aggr */
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
- if (new_ma_addr != ma_addr)
+ if(new_ma_addr != ma_addr)
TEST_ERROR
extended = H5MF_try_extend(f, H5P_DATASET_XFER_DEFAULT, type, (haddr_t)addr, (hsize_t)(TEST_BLOCK_SIZE30-10), (hsize_t)(TEST_BLOCK_SIZE50));
@@ -742,9 +756,12 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != file_size+TEST_BLOCK_SIZE30)
+ if(new_file_size != file_size + TEST_BLOCK_SIZE30)
TEST_ERROR
+ if(H5Pclose(fapl_new) < 0)
+ FAIL_STACK_ERROR
+
PASSED()
} /* end if */
else {
@@ -756,6 +773,7 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
error:
H5E_BEGIN_TRY {
+ H5Pclose(fapl_new);
H5Fclose(file);
} H5E_END_TRY;
return(1);
@@ -947,7 +965,7 @@ static unsigned
test_mf_fs_start(hid_t fapl)
{
hid_t file = -1; /* File ID */
- hid_t fapl_new; /* copy of fapl */
+ hid_t fapl_new = -1; /* copy of fapl */
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
h5_stat_size_t file_size, new_file_size; /* file size */
@@ -1010,15 +1028,19 @@ test_mf_fs_start(hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != file_size)
+ if(new_file_size != file_size)
TEST_ERROR
+ if(H5Pclose(fapl_new) < 0)
+ FAIL_STACK_ERROR
+
PASSED()
return(0);
error:
H5E_BEGIN_TRY {
+ H5Pclose(fapl_new);
H5Fclose(file);
} H5E_END_TRY;
return(1);
@@ -1054,7 +1076,7 @@ static unsigned
test_mf_fs_alloc_free(hid_t fapl)
{
hid_t file = -1; /* File ID */
- hid_t fapl_new; /* copy of fapl */
+ hid_t fapl_new = -1; /* copy of fapl */
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
h5_stat_size_t file_size, new_file_size; /* file size */
@@ -1343,15 +1365,19 @@ test_mf_fs_alloc_free(hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != file_size)
+ if(new_file_size != file_size)
TEST_ERROR
+ if(H5Pclose(fapl_new) < 0)
+ FAIL_STACK_ERROR
+
PASSED()
return(0);
error:
H5E_BEGIN_TRY {
+ H5Pclose(fapl_new);
H5Fclose(file);
} H5E_END_TRY;
return(1);
@@ -1399,7 +1425,7 @@ static unsigned
test_mf_fs_extend(hid_t fapl)
{
hid_t file = -1; /* File ID */
- hid_t fapl_new; /* copy of fapl */
+ hid_t fapl_new = -1; /* copy of fapl */
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
h5_stat_size_t file_size, new_file_size; /* file size */
@@ -1889,15 +1915,19 @@ test_mf_fs_extend(hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != file_size)
+ if(new_file_size != file_size)
TEST_ERROR
+ if(H5Pclose(fapl_new) < 0)
+ FAIL_STACK_ERROR
+
PASSED()
return(0);
error:
H5E_BEGIN_TRY {
+ H5Pclose(fapl_new);
H5Fclose(file);
} H5E_END_TRY;
return(1);
@@ -6635,7 +6665,7 @@ error:
HDmemset(memb_name, 0, sizeof memb_name); \
HDmemset(memb_addr, 0, sizeof memb_addr); \
HDmemset(sv, 0, sizeof sv); \
- for (mt = 0; mt < H5FD_MEM_NTYPES; mt++) { \
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt)) { \
memb_map[mt] = H5FD_MEM_SUPER; \
memb_fapl[mt] = H5P_DEFAULT; \
} \
@@ -6670,20 +6700,21 @@ error:
static unsigned
test_mf_fs_drivers(hid_t fapl)
{
- hid_t fcpl; /* file creation property list */
- hid_t fapl_new; /* copy of file access property list */
- hid_t fapl2; /* copy of file access property list */
+ hid_t fcpl = -1; /* file creation property list */
+ hid_t fapl_new = -1; /* copy of file access property list */
+ hid_t fapl2 = -1; /* copy of file access property list */
hbool_t new_format; /* To use new library format or not */
unsigned ret = 0; /* return value */
H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; /* Memory usage map */
hid_t memb_fapl[H5FD_MEM_NTYPES]; /* Member access properties */
- char sv[H5FD_MEM_NTYPES][500]; /* Name generators */
+ char sv[H5FD_MEM_NTYPES][64]; /* Name generators */
const char *memb_name[H5FD_MEM_NTYPES]; /* Name generators */
haddr_t memb_addr[H5FD_MEM_NTYPES]; /* Member starting address */
/* Create a non-standard file-creation template */
- fcpl = H5Pcreate(H5P_FILE_CREATE);
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
if(H5Pset_file_space(fcpl, H5F_FILE_SPACE_ALL_PERSIST, (hsize_t)0) < 0)
TEST_ERROR
@@ -6784,14 +6815,19 @@ test_mf_fs_drivers(hid_t fapl)
} /* end for new_format */
- if (H5Pclose(fcpl) < 0)
+ if(H5Pclose(fcpl) < 0)
FAIL_STACK_ERROR
- if (H5Pclose(fapl2) < 0)
+ if(H5Pclose(fapl2) < 0)
FAIL_STACK_ERROR
return(ret);
error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fcpl);
+ H5Pclose(fapl2);
+ H5Pclose(fapl_new);
+ } H5E_END_TRY;
return(1);
} /* test_mf_fs_drivers() */
@@ -6804,7 +6840,7 @@ static unsigned
test_filespace_strategy_threshold(hid_t fapl_new)
{
hid_t file = -1; /* File ID */
- hid_t fcpl; /* File creation property list template */
+ hid_t fcpl = -1; /* File creation property list template */
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
H5FD_mem_t type; /* File allocation type */
@@ -6922,7 +6958,10 @@ test_filespace_strategy_threshold(hid_t fapl_new)
TEST_ERROR
break;
+ case H5F_FILE_SPACE_DEFAULT:
+ case H5F_FILE_SPACE_NTYPES:
default:
+ TEST_ERROR
break;
} /* end switch */
@@ -6941,6 +6980,7 @@ test_filespace_strategy_threshold(hid_t fapl_new)
error:
H5E_BEGIN_TRY {
+ H5Pclose(fcpl);
H5Fclose(file);
} H5E_END_TRY;
return(1);
@@ -6954,7 +6994,7 @@ static unsigned
test_filespace_gone(hid_t fapl_new)
{
hid_t file = -1; /* File ID */
- hid_t fcpl; /* File creation propertly list template */
+ hid_t fcpl = -1; /* File creation propertly list template */
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
H5FD_mem_t type; /* File allocation type */
@@ -7074,6 +7114,7 @@ test_filespace_gone(hid_t fapl_new)
error:
H5E_BEGIN_TRY {
+ H5Pclose(fcpl);
H5Fclose(file);
} H5E_END_TRY;
return(1);
@@ -7085,14 +7126,14 @@ error:
static unsigned
test_filespace_drivers(hid_t fapl)
{
- hid_t fapl_new; /* copy of file access property list */
- hid_t fapl2; /* copy of file access property list */
+ hid_t fapl_new = -1; /* copy of file access property list */
+ hid_t fapl2 = -1; /* copy of file access property list */
hbool_t new_format; /* Using library new format or not */
unsigned ret = 0; /* return value */
H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; /* Memory usage map */
hid_t memb_fapl[H5FD_MEM_NTYPES]; /* Member access properties */
- char sv[H5FD_MEM_NTYPES][500]; /* Name generators */
+ char sv[H5FD_MEM_NTYPES][64]; /* Name generators */
const char *memb_name[H5FD_MEM_NTYPES]; /* Name generators */
haddr_t memb_addr[H5FD_MEM_NTYPES]; /* Member starting address */
@@ -7201,6 +7242,10 @@ test_filespace_drivers(hid_t fapl)
return(ret);
error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl_new);
+ H5Pclose(fapl2);
+ } H5E_END_TRY;
return(1);
} /* test_filespace_drivers() */
@@ -7259,10 +7304,9 @@ main(void)
nerrors += test_mf_aggr_absorb(env_h5_drvr, fapl);
/* Tests for alignment */
- for(curr_test = TEST_NORMAL; curr_test < TEST_NTESTS; curr_test++) {
+ for(curr_test = TEST_NORMAL; curr_test < TEST_NTESTS; H5_INC_ENUM(test_type_t, curr_test)) {
switch(curr_test) {
-
case TEST_NORMAL: /* set alignment = 1024 */
if(H5Pset_alignment(new_fapl, (hsize_t)0, (hsize_t)TEST_ALIGN1024) < 0)
TEST_ERROR
@@ -7273,6 +7317,7 @@ main(void)
TEST_ERROR
break;
+ case TEST_NTESTS:
default:
TEST_ERROR;
break;
@@ -7294,7 +7339,7 @@ main(void)
/* tests for file space management */
nerrors += test_filespace_drivers(fapl);
- if (H5Pclose(new_fapl) < 0)
+ if(H5Pclose(new_fapl) < 0)
FAIL_STACK_ERROR
h5_cleanup(FILENAME, fapl);
@@ -7302,13 +7347,14 @@ main(void)
goto error;
puts("All free-space manager tests for file memory passed.");
- return (0);
+ return(0);
error:
puts("*** TESTS FAILED ***");
H5E_BEGIN_TRY {
H5Pclose(fapl);
+ H5Pclose(new_fapl);
} H5E_END_TRY;
- return (1);
+ return(1);
} /* main() */
diff --git a/test/ohdr.c b/test/ohdr.c
index dad06cf..109d59c 100644
--- a/test/ohdr.c
+++ b/test/ohdr.c
@@ -68,6 +68,9 @@ test_cont(char *filename, hid_t fapl)
TESTING("object header continuation block");
+ HDmemset(&oh_locA, 0, sizeof(oh_locA));
+ HDmemset(&oh_locB, 0, sizeof(oh_locB));
+
/* Create the file to operate on */
if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR
if(NULL == (f = (H5F_t *)H5I_object(file))) FAIL_STACK_ERROR
@@ -77,13 +80,10 @@ test_cont(char *filename, hid_t fapl)
goto error;
}
- HDmemset(&oh_locA, 0, sizeof(oh_locA));
- HDmemset(&oh_locB, 0, sizeof(oh_locB));
-
- if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)H5O_MIN_SIZE, H5P_GROUP_CREATE_DEFAULT, &oh_locA/*out*/) < 0)
+ if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)H5O_MIN_SIZE, (size_t)0, H5P_GROUP_CREATE_DEFAULT, &oh_locA/*out*/) < 0)
FAIL_STACK_ERROR
- if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)H5O_MIN_SIZE, H5P_GROUP_CREATE_DEFAULT, &oh_locB/*out*/) < 0)
+ if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)H5O_MIN_SIZE, (size_t)0, H5P_GROUP_CREATE_DEFAULT, &oh_locB/*out*/) < 0)
FAIL_STACK_ERROR
time_new = 11111111;
@@ -107,6 +107,10 @@ test_cont(char *filename, hid_t fapl)
if(H5O_msg_create(&oh_locA, H5O_NAME_ID, 0, 0, &short_name, H5P_DATASET_XFER_DEFAULT) < 0)
FAIL_STACK_ERROR
+ if(1 != H5O_link(&oh_locA, 1, H5P_DATASET_XFER_DEFAULT))
+ FAIL_STACK_ERROR
+ if(1 != H5O_link(&oh_locB, 1, H5P_DATASET_XFER_DEFAULT))
+ FAIL_STACK_ERROR
if(H5AC_flush(f, H5P_DATASET_XFER_DEFAULT) < 0)
FAIL_STACK_ERROR
if(H5O_expunge_chunks_test(&oh_locA, H5P_DATASET_XFER_DEFAULT) < 0)
@@ -148,6 +152,149 @@ error:
return -1;
} /* test_cont() */
+/*
+ * Verify that object headers are held in the cache until they are linked
+ * to a location in the graph, or assigned an ID. This is done by
+ * creating an object header, then forcing it out of the cache by creating
+ * local heaps until the object header is evicted from the cache, then
+ * modifying the object header. The refcount on the object header is
+ * checked as verifying that the object header has remained in the cache.
+ */
+static herr_t
+test_ohdr_cache(char *filename, hid_t fapl)
+{
+ hid_t file = -1; /* File ID */
+ hid_t my_fapl; /* FAPL ID */
+ hid_t my_dxpl; /* DXPL ID */
+ H5AC_cache_config_t mdc_config; /* Metadata cache configuration info */
+ H5F_t *f = NULL; /* File handle */
+ H5HL_t *lheap, *lheap2, *lheap3; /* Pointer to local heaps */
+ haddr_t lheap_addr, lheap_addr2, lheap_addr3; /* Local heap addresses */
+ H5O_loc_t oh_loc; /* Object header location */
+ time_t time_new; /* Time value for modification time message */
+ unsigned rc; /* Refcount for object */
+
+ TESTING("object header creation in cache");
+
+ /* Make a copy of the FAPL */
+ if((my_fapl = H5Pcopy(fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Tweak down the size of the metadata cache to only 64K */
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ if(H5Pget_mdc_config(my_fapl, &mdc_config) < 0)
+ FAIL_STACK_ERROR
+ mdc_config.set_initial_size = TRUE;
+ mdc_config.initial_size = 32 * 1024;
+ mdc_config.max_size = 64 * 1024;
+ mdc_config.min_size = 8 * 1024;
+ if(H5Pset_mdc_config(my_fapl, &mdc_config) < 0)
+ FAIL_STACK_ERROR
+
+ /* Make a copy of the default DXPL */
+ if((my_dxpl = H5Pcopy(H5P_DATASET_XFER_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create the file to operate on */
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, my_fapl)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pclose(my_fapl) < 0)
+ FAIL_STACK_ERROR
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
+ if(H5AC_ignore_tags(f) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create object (local heap) that occupies most of cache */
+ if(H5HL_create(f, my_dxpl, (31 * 1024), &lheap_addr) < 0)
+ FAIL_STACK_ERROR
+
+ /* Protect local heap (which actually pins it in the cache) */
+ if(NULL == (lheap = H5HL_protect(f, my_dxpl, lheap_addr, H5AC_READ)))
+ FAIL_STACK_ERROR
+
+ /* Create an object header */
+ HDmemset(&oh_loc, 0, sizeof(oh_loc));
+ if(H5O_create(f, my_dxpl, (size_t)2048, (size_t)1, H5P_GROUP_CREATE_DEFAULT, &oh_loc/*out*/) < 0)
+ FAIL_STACK_ERROR
+
+ /* Query object header information */
+ rc = 0;
+ if(H5O_get_rc(&oh_loc, my_dxpl, &rc) < 0)
+ FAIL_STACK_ERROR
+ if(0 != rc)
+ TEST_ERROR
+
+ /* Create object (local heap) that occupies most of cache */
+ if(H5HL_create(f, my_dxpl, (31 * 1024), &lheap_addr2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Protect local heap (which actually pins it in the cache) */
+ if(NULL == (lheap2 = H5HL_protect(f, my_dxpl, lheap_addr2, H5AC_READ)))
+ FAIL_STACK_ERROR
+
+ /* Unprotect local heap (which actually unpins it from the cache) */
+ if(H5HL_unprotect(lheap2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create object header message in new object header */
+ time_new = 11111111;
+ if(H5O_msg_create(&oh_loc, H5O_MTIME_NEW_ID, 0, 0, &time_new, my_dxpl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create object (local heap) that occupies most of cache */
+ if(H5HL_create(f, my_dxpl, (31 * 1024), &lheap_addr3) < 0)
+ FAIL_STACK_ERROR
+
+ /* Protect local heap (which actually pins it in the cache) */
+ if(NULL == (lheap3 = H5HL_protect(f, my_dxpl, lheap_addr3, H5AC_READ)))
+ FAIL_STACK_ERROR
+
+ /* Unprotect local heap (which actually unpins it from the cache) */
+ if(H5HL_unprotect(lheap3) < 0)
+ FAIL_STACK_ERROR
+
+ /* Query object header information */
+ /* (Note that this is somewhat of a weak test, since it doesn't actually
+ * verify that the object header was evicted from the cache, but it's
+ * very difficult to verify when an entry is evicted from the cache in
+ * a non-invasive way -QAK)
+ */
+ rc = 0;
+ if(H5O_get_rc(&oh_loc, my_dxpl, &rc) < 0)
+ FAIL_STACK_ERROR
+ if(0 != rc)
+ TEST_ERROR
+
+ /* Decrement reference count o object header */
+ if(H5O_dec_rc_by_loc(&oh_loc, my_dxpl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close object header created */
+ if(H5O_close(&oh_loc) < 0)
+ FAIL_STACK_ERROR
+
+ /* Unprotect local heap (which actually unpins it from the cache) */
+ if(H5HL_unprotect(lheap) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5Pclose(my_dxpl) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Fclose(file);
+ } H5E_END_TRY;
+
+ return -1;
+} /* test_ohdr_cache() */
+
/*-------------------------------------------------------------------------
* Function: main
@@ -216,7 +363,7 @@ main(void)
*/
TESTING("object header creation");
HDmemset(&oh_loc, 0, sizeof(oh_loc));
- if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)64, H5P_GROUP_CREATE_DEFAULT, &oh_loc/*out*/) < 0)
+ if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)64, (size_t)0, H5P_GROUP_CREATE_DEFAULT, &oh_loc/*out*/) < 0)
FAIL_STACK_ERROR
PASSED();
@@ -226,6 +373,8 @@ main(void)
time_new = 11111111;
if(H5O_msg_create(&oh_loc, H5O_MTIME_NEW_ID, 0, 0, &time_new, H5P_DATASET_XFER_DEFAULT) < 0)
FAIL_STACK_ERROR
+ if(1 != H5O_link(&oh_loc, 1, H5P_DATASET_XFER_DEFAULT))
+ FAIL_STACK_ERROR
if(H5AC_flush(f, H5P_DATASET_XFER_DEFAULT) < 0)
FAIL_STACK_ERROR
if(H5AC_expunge_entry(f, H5P_DATASET_XFER_DEFAULT, H5AC_OHDR, oh_loc.addr, H5AC__NO_FLAGS_SET) < 0)
@@ -378,12 +527,16 @@ main(void)
*/
TESTING("locking messages");
HDmemset(&oh_loc, 0, sizeof(oh_loc));
- if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)64, H5P_GROUP_CREATE_DEFAULT, &oh_loc/*out*/) < 0)
+ if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)64, (size_t)0, H5P_GROUP_CREATE_DEFAULT, &oh_loc/*out*/) < 0)
+ FAIL_STACK_ERROR
+ if(1 != H5O_link(&oh_loc, 1, H5P_DATASET_XFER_DEFAULT))
FAIL_STACK_ERROR
/* Create second object header, to guarantee that first object header uses multiple chunks */
HDmemset(&oh_loc2, 0, sizeof(oh_loc2));
- if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)64, H5P_GROUP_CREATE_DEFAULT, &oh_loc2/*out*/) < 0)
+ if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)64, (size_t)0, H5P_GROUP_CREATE_DEFAULT, &oh_loc2/*out*/) < 0)
+ FAIL_STACK_ERROR
+ if(1 != H5O_link(&oh_loc2, 1, H5P_DATASET_XFER_DEFAULT))
FAIL_STACK_ERROR
/* Fill object header with messages, creating multiple chunks */
@@ -452,12 +605,16 @@ main(void)
/* Open first object header */
HDmemset(&oh_loc, 0, sizeof(oh_loc));
- if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)64, H5P_GROUP_CREATE_DEFAULT, &oh_loc/*out*/) < 0)
+ if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)64, (size_t)0, H5P_GROUP_CREATE_DEFAULT, &oh_loc/*out*/) < 0)
+ FAIL_STACK_ERROR
+ if(1 != H5O_link(&oh_loc, 1, H5P_DATASET_XFER_DEFAULT))
FAIL_STACK_ERROR
/* Create second object header, to guarantee that first object header uses multiple chunks */
HDmemset(&oh_loc2, 0, sizeof(oh_loc2));
- if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)64, H5P_GROUP_CREATE_DEFAULT, &oh_loc2/*out*/) < 0)
+ if(H5O_create(f, H5P_DATASET_XFER_DEFAULT, (size_t)64, (size_t)0, H5P_GROUP_CREATE_DEFAULT, &oh_loc2/*out*/) < 0)
+ FAIL_STACK_ERROR
+ if(1 != H5O_link(&oh_loc2, 1, H5P_DATASET_XFER_DEFAULT))
FAIL_STACK_ERROR
/* Add message to move to object header */
@@ -632,6 +789,10 @@ main(void)
/* Close the file we created */
if(H5Fclose(file) < 0)
TEST_ERROR
+
+ /* Test object header creation metadata cache issues */
+ if(test_ohdr_cache(filename, fapl) < 0)
+ TEST_ERROR
} /* end for */
puts("All object header tests passed.");
diff --git a/test/testerror.sh.in b/test/testerror.sh.in
index 7f9657a..440be4f 100644
--- a/test/testerror.sh.in
+++ b/test/testerror.sh.in
@@ -71,6 +71,8 @@ TEST() {
-e 's/line [0-9]*/line (number)/' \
-e 's/v[1-9]*\.[0-9]*\./version (number)\./' \
-e 's/[1-9]*\.[0-9]*\.[0-9]*[^)]*/version (number)/' \
+ -e 's/H5Eget_auto[1-2]*/H5Eget_auto(1 or 2)/' \
+ -e 's/H5Eset_auto[1-2]*/H5Eset_auto(1 or 2)/' \
$actual_err > $actual_ext
cat $actual_ext >> $actual
diff --git a/test/testfiles/err_compat_1 b/test/testfiles/err_compat_1
index 032e7bc..e2b37ab 100644
--- a/test/testfiles/err_compat_1
+++ b/test/testfiles/err_compat_1
@@ -1,7 +1,7 @@
#############################
Expected output for err_compat
#############################
-Testing error API based on data I/O All error API tests passed.
+Testing error API H5Eset/get_auto Testing error API based on data I/O All error API tests passed.
This program tests the Error API compatible with HDF5 version (number). There're supposed to be some error messages
********* Print error stack in HDF5 default way *********
HDF5-DIAG: Error detected in HDF5 (version (number)) thread (IDs):
@@ -15,10 +15,46 @@ HDF5-DIAG: Error detected in HDF5 (version (number)) thread (IDs):
minor: Bad value
HDF5-DIAG: Error detected in HDF5 (version (number)) thread (IDs):
+ #000: (file name) line (number) in H5Dcreate2(): not a location ID
+ major: Invalid arguments to routine
+ minor: Inappropriate type
+ #001: (file name) line (number) in H5G_loc(): invalid object ID
+ major: Invalid arguments to routine
+ minor: Bad value
+
+********* Print error stack in customized way *********
+ error #000: (file name) in H5G_loc(): line (number)
+ major: Invalid arguments to routine
+ minor: Bad value
+ error #001: (file name) in H5Dcreate2(): line (number)
+ major: Invalid arguments to routine
+ minor: Inappropriate type
+
+********* Print error stack in customized way *********
+ error #000: (file name) in H5Eget_auto(1 or 2)(): line (number)
+ major: Error API
+ minor: Can't get value
+
+********* Print error stack in customized way *********
+ error #000: (file name) in H5G_loc(): line (number)
+ major: Invalid arguments to routine
+ minor: Bad value
+ error #001: (file name) in H5Dcreate2(): line (number)
+ major: Invalid arguments to routine
+ minor: Inappropriate type
+HDF5-DIAG: Error detected in HDF5 (version (number)) thread (IDs):
+ #000: (file name) line (number) in H5Dcreate2(): not a location ID
+ major: Invalid arguments to routine
+ minor: Inappropriate type
+ #001: (file name) line (number) in H5G_loc(): invalid object ID
+ major: Invalid arguments to routine
+ minor: Bad value
+
+HDF5-DIAG: Error detected in HDF5 (version (number)) thread (IDs):
#000: (file name) line (number) in main(): Error test failed
major: Error API
minor: Unrecognized message
- #001: (file name) line (number) in test_error(): H5Dwrite shouldn't succeed
+ #001: (file name) line (number) in test_error2(): H5Dwrite shouldn't succeed
major: Error API
minor: Write failed
#002: (file name) line (number) in H5Dwrite(): not a dataset
diff --git a/test/testfiles/links_env.out b/test/testfiles/links_env.out
new file mode 100644
index 0000000..3ca9b99
--- /dev/null
+++ b/test/testfiles/links_env.out
@@ -0,0 +1,6 @@
+#############################
+Expected output for links_env
+#############################
+Testing external links via environment variable PASSED
+Testing external links via environment variable (w/new group format) PASSED
+All external Link (HDF5_EXT_PREFIX) tests passed.
diff --git a/test/testframe.c b/test/testframe.c
index 082a27f..6fbace1 100644
--- a/test/testframe.c
+++ b/test/testframe.c
@@ -124,7 +124,7 @@ AddTest(const char *TheName, void (*TheCall) (void), void (*Cleanup) (void), con
*/
void TestInit(const char *ProgName, void (*private_usage)(void), int (*private_parser)(int ac, char *av[]))
{
-#if !(defined MAC || defined __MWERKS__ || defined SYMANTEC_C)
+#if !(defined MAC)
/* Un-buffer the stdout and stderr */
setbuf(stderr, NULL);
setbuf(stdout, NULL);
diff --git a/test/testlinks_env.sh.in b/test/testlinks_env.sh.in
new file mode 100644
index 0000000..e71dfc8
--- /dev/null
+++ b/test/testlinks_env.sh.in
@@ -0,0 +1,46 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+# Test for external link with environment variable: HDF5_EXT_PREFIX
+
+nerrors=0
+
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+
+##############################################################################
+##############################################################################
+### T H E T E S T S ###
+##############################################################################
+##############################################################################
+
+# test for external links with HDF5_EXT_PREFIX
+echo "Testing external link with HDF5_EXT_PREFIX"
+TEST_NAME=links_env # The test name
+TEST_BIN=`pwd`/$TEST_NAME # The path of the test binary
+ENVCMD="env HDF5_EXT_PREFIX=.:tmp" # The environment variable & value
+#
+# Run the test
+$ENVCMD $TEST_BIN
+exitcode=$?
+if [ $exitcode -eq 0 ]; then
+ echo "Test for HDF5_EXT_PREFIX PASSED"
+ else
+ nerrors="`expr $nerrors + 1`"
+ echo "***Error encountered for HDF5_EXT_PREFIX test***"
+fi
+exit $nerrors
diff --git a/test/th5o.c b/test/th5o.c
index 6091776..17619ee 100644
--- a/test/th5o.c
+++ b/test/th5o.c
@@ -898,6 +898,337 @@ test_h5o_link(void)
/****************************************************************
**
+** test_h5o_comment(): Test H5Oset(get)_comment functions.
+**
+****************************************************************/
+static void
+test_h5o_comment(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t grp, dset, dtype, dspace; /* Object identifiers */
+ hid_t attr_space, attr_id;
+ hsize_t dims[RANK];
+ hsize_t attr_dims = 1;
+ int attr_value = 5;
+ const char *file_comment = "file comment";
+ const char *grp_comment = "group comment";
+ const char *dset_comment = "dataset comment";
+ const char *dtype_comment = "datatype comment";
+ char check_comment[64];
+ ssize_t comment_len = 0;
+ herr_t ret; /* Value returned from API calls */
+ int ret_value;
+
+ /* Create a new HDF5 file */
+ fid = H5Fcreate(TEST_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create an attribute for the file */
+ attr_space = H5Screate_simple(1, &attr_dims, NULL);
+ CHECK(attr_space, FAIL, "H5Screate_simple");
+ attr_id = H5Acreate2(fid, "file attribute", H5T_NATIVE_INT, attr_space, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate2");
+ ret = H5Awrite(attr_id, H5T_NATIVE_INT, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Putting a comment on the file through its attribute */
+ ret = H5Oset_comment(attr_id, file_comment);
+ CHECK(ret, FAIL, "H5Oset_comment");
+
+ ret = H5Sclose(attr_space);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create a group, dataset, and committed datatype within the file */
+ /* Create the group */
+ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gcreate2");
+
+ /* Putting a comment on the group */
+ ret = H5Oset_comment(grp, grp_comment);
+ CHECK(ret, FAIL, "H5Oset_comment");
+
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Commit the type inside the group */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Putting a comment on the committed data type */
+ ret = H5Oset_comment(dtype, dtype_comment);
+ CHECK(ret, FAIL, "H5Oset_comment");
+
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create the data space for the dataset. */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ dspace = H5Screate_simple(RANK, dims, NULL);
+ CHECK(dspace, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ /* Putting a comment on the dataset */
+ ret = H5Oset_comment(dset, dset_comment);
+ CHECK(ret, FAIL, "H5Oset_comment");
+
+ /* Putting a comment on the dataspace. It's supposed to fail. */
+ H5E_BEGIN_TRY {
+ ret = H5Oset_comment(dspace, "dataspace comment");
+ } H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Oset_comment");
+
+ /* Close the file */
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+
+ /* Now make sure that the comments are correct all 4 types of objects */
+ /* Open file */
+ fid = H5Fopen(TEST_FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Getting the comment on the file and verify it */
+ comment_len = H5Oget_comment(fid, NULL, (size_t)0);
+ CHECK(comment_len, FAIL, "H5Oget_comment");
+
+ ret = H5Oget_comment(fid, check_comment, (size_t)comment_len+1);
+ CHECK(ret, FAIL, "H5Oget_comment");
+
+ ret_value = HDstrcmp(file_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment");
+
+ /* Open the group */
+ grp = H5Gopen2(fid, "group", H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gopen2");
+
+ /* Getting the comment on the group and verify it */
+ comment_len = H5Oget_comment(grp, NULL, (size_t)0);
+ CHECK(comment_len, FAIL, "H5Oget_comment");
+
+ ret = H5Oget_comment(grp, check_comment, (size_t)comment_len+1);
+ CHECK(ret, FAIL, "H5Oget_comment");
+
+ ret_value = HDstrcmp(grp_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment");
+
+ /* Open the datatype */
+ dtype = H5Topen2(fid, "group/datatype", H5P_DEFAULT);
+ CHECK(dtype, FAIL, "H5Topen2");
+
+ /* Getting the comment on the datatype and verify it */
+ comment_len = H5Oget_comment(dtype, NULL, (size_t)0);
+ CHECK(comment_len, FAIL, "H5Oget_comment");
+
+ ret = H5Oget_comment(dtype, check_comment, (size_t)comment_len+1);
+ CHECK(ret, FAIL, "H5Oget_comment");
+
+ ret_value = HDstrcmp(dtype_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment");
+
+ /* Open the dataset */
+ dset = H5Dopen2(fid, "dataset", H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dopen2");
+
+ /* Getting the comment on the dataset and verify it */
+ comment_len = H5Oget_comment(dset, NULL, (size_t)0);
+ CHECK(comment_len, FAIL, "H5Oget_comment");
+
+ ret = H5Oget_comment(dset, check_comment, (size_t)comment_len+1);
+ CHECK(ret, FAIL, "H5Oget_comment");
+
+ ret_value = HDstrcmp(dset_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment");
+
+
+ /* Close the IDs */
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* test_h5o_comment() */
+
+
+/****************************************************************
+**
+** test_h5o_comment_by_name(): Test H5Oset(get)_comment_by_name functions.
+**
+****************************************************************/
+static void
+test_h5o_comment_by_name(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t grp, dset, dtype, dspace; /* Object identifiers */
+ hid_t attr_space, attr_id;
+ hsize_t dims[RANK];
+ hsize_t attr_dims = 1;
+ int attr_value = 5;
+ const char *file_comment = "file comment by name";
+ const char *grp_comment = "group comment by name";
+ const char *dset_comment = "dataset comment by name";
+ const char *dtype_comment = "datatype comment by name";
+ char check_comment[64];
+ ssize_t comment_len = 0;
+ herr_t ret; /* Value returned from API calls */
+ int ret_value;
+
+ /* Create a new HDF5 file */
+ fid = H5Fcreate(TEST_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create an attribute for the file */
+ attr_space = H5Screate_simple(1, &attr_dims, NULL);
+ CHECK(attr_space, FAIL, "H5Screate_simple");
+ attr_id = H5Acreate2(fid, "file attribute", H5T_NATIVE_INT, attr_space, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate2");
+ ret = H5Awrite(attr_id, H5T_NATIVE_INT, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Putting a comment on the file through its attribute */
+ ret = H5Oset_comment_by_name(attr_id, ".", file_comment, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oset_comment_by_name");
+
+ ret = H5Sclose(attr_space);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create a group, dataset, and committed datatype within the file */
+ /* Create the group */
+ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gcreate2");
+
+ /* Putting a comment on the group */
+ ret = H5Oset_comment_by_name(fid, "group", grp_comment, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oset_comment_by_name");
+
+ /* Commit the type inside the group */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Putting a comment on the committed data type */
+ ret = H5Oset_comment_by_name(grp, "datatype", dtype_comment, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oset_comment_by_name");
+
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create the data space for the dataset. */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ dspace = H5Screate_simple(RANK, dims, NULL);
+ CHECK(dspace, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ /* Putting a comment on the dataset */
+ ret = H5Oset_comment_by_name(fid, "dataset", dset_comment, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oset_comment_by_name");
+
+ /* Putting a comment on the dataspace. It's supposed to fail. */
+ H5E_BEGIN_TRY {
+ ret = H5Oset_comment_by_name(dspace, ".", "dataspace comment", H5P_DEFAULT);
+ } H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Oset_comment");
+
+ /* Close the file */
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Now make sure that the comments are correct all 4 types of objects */
+ /* Open file */
+ fid = H5Fopen(TEST_FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Getting the comment on the file and verify it */
+ comment_len = H5Oget_comment_by_name(fid, ".", NULL, (size_t)0, H5P_DEFAULT);
+ CHECK(comment_len, FAIL, "H5Oget_comment_by_name");
+
+ ret = H5Oget_comment_by_name(fid, ".", check_comment, (size_t)comment_len+1, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_comment_by_name");
+
+ ret_value = HDstrcmp(file_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment_by_name");
+
+ /* Open the group */
+ grp = H5Gopen2(fid, "group", H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gopen2");
+
+ /* Getting the comment on the group and verify it */
+ comment_len = H5Oget_comment_by_name(fid, "group", NULL, (size_t)0, H5P_DEFAULT);
+ CHECK(comment_len, FAIL, "H5Oget_comment_by_name");
+
+ ret = H5Oget_comment_by_name(fid, "group", check_comment, (size_t)comment_len+1, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_comment_by_name");
+
+ ret_value = HDstrcmp(grp_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment_by_name");
+
+ /* Getting the comment on the datatype and verify it */
+ comment_len = H5Oget_comment_by_name(grp, "datatype", NULL, (size_t)0, H5P_DEFAULT);
+ CHECK(comment_len, FAIL, "H5Oget_comment_by_name");
+
+ ret = H5Oget_comment_by_name(grp, "datatype", check_comment, (size_t)comment_len+1, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_comment");
+
+ ret_value = HDstrcmp(dtype_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment_by_name");
+
+ /* Getting the comment on the dataset and verify it */
+ comment_len = H5Oget_comment_by_name(fid, "dataset", NULL, (size_t)0, H5P_DEFAULT);
+ CHECK(comment_len, FAIL, "H5Oget_comment_by_name");
+
+ ret = H5Oget_comment_by_name(fid, "dataset", check_comment, (size_t)comment_len+1, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_comment_by_name");
+
+ ret_value = HDstrcmp(dset_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment_by_name");
+
+ /* Close the IDs */
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* test_h5o_comment_by_name() */
+
+
+
+/****************************************************************
+**
** test_h5o(): Main H5O (generic object) testing routine.
**
****************************************************************/
@@ -913,6 +1244,8 @@ test_h5o(void)
test_h5o_refcount(); /* Test incrementing and decrementing reference count */
test_h5o_plist(); /* Test object creation properties */
test_h5o_link(); /* Test object link routine */
+ test_h5o_comment(); /* Test routines for comment */
+ test_h5o_comment_by_name(); /* Test routines for comment by name */
} /* test_h5o() */
diff --git a/test/vms_data.h5 b/test/vms_data.h5
index 9c243ff..14aeef2 100644
--- a/test/vms_data.h5
+++ b/test/vms_data.h5
Binary files differ